JSDOC/TokenReader.vala
[gnome.introspection-doc-generator] / JSDOC / TokenReader.vala
1 //<script type="text/javascript">
2
3  
4
5
6 //const Token   = imports.Token.Token;
7 //const Lang    = imports.Lang.Lang;
8
9 /**
10         @class Search a {@link JSDOC.TextStream} for language tokens.
11 */
12
13 namespace JSDOC {
14
15     public class TokenArray: Object {
16         
17         public Gee.ArrayList<Token> tokens;
18         public int length {
19             get { return this.tokens.size }
20         }
21         
22         public TokenArray()
23         {
24             this.items = new Gee.ArrayList<Token>();
25         }
26         
27         public Token? last() {
28             if (this.tokens > 0) {
29                 return this.tokens[this.tokens.length-1];
30             }
31             return null;
32         }
33         public Token? lastSym () {
34             for (var i = this.tokens.length-1; i >= 0; i--) {
35                 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM")))  {
36                     return this.tokens.get(i);
37                 }
38             }
39             return null;
40         }
41         public void push (Token t) {
42             this.tokens.add(t);
43         }
44         public Token get(int i) {
45             return this.tokens.get(i);
46         }
47     }
48
49
50     public class TokenReader : Object
51     {
52         
53         
54         
55         /*
56          *
57          * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
58          */
59         
60         /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
61         public bool collapseWhite = false, // only reduces white space...
62         /** @cfg {Boolean} keepDocs keep JSDOC comments **/
63         public bool keepDocs = true,
64         /** @cfg {Boolean} keepWhite keep White space **/
65         public bool keepWhite = false,
66         /** @cfg {Boolean} keepComments  keep all comments **/
67         public bool keepComments = false,
68         /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
69         public bool sepIdents = false,
70         /** @cfg {String} filename name of file being parsed. **/
71         public string filename = "";
72         /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
73         public bool ignoreBadGrammer = false,
74         
75         
76         int line = 0;
77         
78         /**
79          * tokenize a stream
80          * @return {Array} of tokens
81          * 
82          * ts = new TextStream(File.read(str));
83          * tr = TokenReader({ keepComments : true, keepWhite : true });
84          * tr.tokenize(ts)
85          * 
86          */
87         public TokenArray tokenize(TextStream stream)
88         {
89             this.line =1;
90             var tokens = new TokenArray();
91            
92             bool eof;
93             while (!stream.lookEOF()) {
94                 
95                 
96                 if (this.read_mlcomment(stream, tokens)) continue;
97                 if (this.read_slcomment(stream, tokens)) continue;
98                 if (this.read_dbquote(stream, tokens))   continue;
99                 if (this.read_snquote(stream, tokens))   continue;
100                 if (this.read_regx(stream, tokens))      continue;
101                 if (this.read_numb(stream, tokens))      continue;
102                 if (this.read_punc(stream, tokens))      continue;
103                 if (this.read_newline(stream, tokens))   continue;
104                 if (this.read_space(stream, tokens))     continue;
105                 if (this.read_word(stream, tokens))      continue;
106                 
107                 // if execution reaches here then an error has happened
108                 tokens.push(
109                         new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
110                 );
111             }
112             
113             
114             
115             return tokens;
116         }
117
118         /**
119          * findPuncToken - find the id of a token (previous to current)
120          * need to back check syntax..
121          * 
122          * @arg {Array} tokens the array of tokens.
123          * @arg {String} token data (eg. '(')
124          * @arg {Number} offset where to start reading from
125          * @return {Number} position of token
126          */
127         public int findPuncToken(TokenArray tokens, string data, int n)
128         {
129             n = n || tokens.length -1;
130             var stack = 0;
131             while (n > -1) {
132                 
133                 if (!stack && tokens.get(n).data == data) {
134                     return n;
135                 }
136                 
137                 if (tokens.get(n).data  == ')' || tokens.get(n).data  == '}') {
138                     stack++;
139                     n--;
140                     continue;
141                 }
142                 if (stack && (tokens.get(n).data  == '{' || tokens.get(n).data  == '(')) {
143                     stack--;
144                     n--;
145                     continue;
146                 }
147                 
148                 
149                 n--;
150             }
151             return -1;
152         }
153         /**
154          * lastSym - find the last token symbol
155          * need to back check syntax..
156          * 
157          * @arg {Array} tokens the array of tokens.
158          * @arg {Number} offset where to start..
159          * @return {Token} the token
160          */
161         public Token lastSym(TokenArray tokens, int n)
162         {
163             for (var i = n-1; i >= 0; i--) {
164                 if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
165                     return tokens.get(i);
166                 }
167             }
168             return null;
169         }
170         
171          
172         
173         /**
174             @returns {Boolean} Was the token found?
175          */
176         public bool read_word (TokenStream stream, TokenArray tokens)
177         {
178             string found = "";
179             while (!stream.lookEOF() && Lang.isWordChar(stream.look())) {
180                 found += stream.next();
181             }
182             
183             if (found == "") {
184                 return false;
185             }
186             
187             var name = Lang.keyword(found);
188             if (name != null) {
189                 
190                 // look for "()return" ?? why ???
191                 
192                 if (found == "return" && tokens.lastSym().data == ")") {
193                     //Seed.print('@' + tokens.length);
194                     var n = this.findPuncToken(tokens, ")");
195                     //Seed.print(')@' + n);
196                     n = this.findPuncToken(tokens, "(", n-1);
197                     //Seed.print('(@' + n);
198                     
199                     var lt = this.lastSym(tokens, n);
200                     
201                     //print(JSON.stringify(lt));
202                     if (lt.type != "KEYW" || ["IF", 'WHILE'].indexOf(lt.name) < -1) {
203                         if (!this.ignoreBadGrammer) {
204                             throw {
205                                 name : "ArgumentError", 
206                                 message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
207                             }
208                         }
209                     }
210                     
211                     
212                     
213                 }
214                 
215                 tokens.push(new Token(found, "KEYW", name, this.line));
216                 return true;
217             }
218             if (!this.sepIdents || found.indexOf('.') < 0 ) {
219                 tokens.push(new Token(found, "NAME", "NAME", this.line));
220                 return true;
221             }
222             var n = found.split('.');
223             var p = false;
224             var _this = this;
225             n.forEach(function(nm) {
226                 if (p) {
227                     tokens.push(new Token('.', "PUNC", "DOT", _this.line));
228                 }
229                 p=true;
230                 tokens.push(new Token(nm, "NAME", "NAME", _this.line));
231             });
232             return true;
233                 
234
235         }
236
237         /**
238             @returns {Boolean} Was the token found?
239          */
240         read_punc : function(/**JSDOC.TokenStream*/stream, tokens) {
241             var found = "";
242             var name;
243             while (!stream.look().eof && Lang.punc(found+stream.look())) {
244                 found += stream.next();
245             }
246             
247             
248             if (found === "") {
249                 return false;
250             }
251             
252             if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
253                 //print("Error - comma found before " + found);
254                 //print(JSON.stringify(tokens.lastSym(), null,4));
255                 if (this.ignoreBadGrammer) {
256                     print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
257                 } else {
258                     
259                     throw {
260                         name : "ArgumentError", 
261                         message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
262                     }
263                 }
264             }
265             
266             tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
267             return true;
268             
269         },
270
271         /**
272             @returns {Boolean} Was the token found?
273          */
274         read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
275             var found = "";
276             
277             while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
278                 found += stream.next();
279             }
280             
281             if (found === "") {
282                 return false;
283             }
284             //print("WHITE = " + JSON.stringify(found)); 
285             if (this.collapseWhite) found = " ";
286             if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
287             return true;
288         
289         },
290
291         /**
292             @returns {Boolean} Was the token found?
293          */
294         read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
295             var found = "";
296             var line = this.line;
297             while (!stream.look().eof && Lang.isNewline(stream.look())) {
298                 this.line++;
299                 found += stream.next();
300             }
301             
302             if (found === "") {
303                 return false;
304             }
305             //this.line++;
306             if (this.collapseWhite) {
307                 found = "\n";
308             }
309              if (this.keepWhite) {
310                 var last = tokens ? tokens.pop() : false;
311                 if (last && last.name != "WHIT") {
312                     tokens.push(last);
313                 }
314                 
315                 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
316             }
317             return true;
318         },
319
320         /**
321             @returns {Boolean} Was the token found?
322          */
323         read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
324             if (stream.look() == "/" && stream.look(1) == "*") {
325                 var found = stream.next(2);
326                 var c = '';
327                 var line = this.line;
328                 while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
329                     c = stream.next();
330                     if (c == "\n") this.line++;
331                     found += c;
332                 }
333                 
334                 // to start doclet we allow /** or /*** but not /**/ or /****
335                 if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
336                 else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
337                 return true;
338             }
339             return false;
340         },
341
342         /**
343             @returns {Boolean} Was the token found?
344          */
345         read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
346             var found;
347             if (
348                 (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
349                 || 
350                 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
351             ) {
352                 var line = this.line;
353                 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
354                     found += stream.next();
355                 }
356                 if (!stream.look().eof) {
357                     found += stream.next();
358                 }
359                 if (this.keepComments) {
360                     tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
361                 }
362                 this.line++;
363                 return true;
364             }
365             return false;
366         },
367
368         /**
369             @returns {Boolean} Was the token found?
370          */
371         read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
372             if (stream.look() == "\"") {
373                 // find terminator
374                 var string = stream.next();
375                 
376                 while (!stream.look().eof) {
377                     if (stream.look() == "\\") {
378                         if (Lang.isNewline(stream.look(1))) {
379                             do {
380                                 stream.next();
381                             } while (!stream.look().eof && Lang.isNewline(stream.look()));
382                             string += "\\\n";
383                         }
384                         else {
385                             string += stream.next(2);
386                         }
387                     }
388                     else if (stream.look() == "\"") {
389                         string += stream.next();
390                         tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
391                         return true;
392                     }
393                     else {
394                         string += stream.next();
395                     }
396                 }
397             }
398             return false; // error! unterminated string
399         },
400
401         /**
402             @returns {Boolean} Was the token found?
403          */
404         read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
405             if (stream.look() == "'") {
406                 // find terminator
407                 var string = stream.next();
408                 
409                 while (!stream.look().eof) {
410                     if (stream.look() == "\\") { // escape sequence
411                         string += stream.next(2);
412                     }
413                     else if (stream.look() == "'") {
414                         string += stream.next();
415                         tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
416                         return true;
417                     }
418                     else {
419                         string += stream.next();
420                     }
421                 }
422             }
423             return false; // error! unterminated string
424         },
425
426         /**
427             @returns {Boolean} Was the token found?
428          */
429         read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
430             if (stream.look() === "0" && stream.look(1) == "x") {
431                 return this.read_hex(stream, tokens);
432             }
433             
434             var found = "";
435             
436             while (!stream.look().eof && Lang.isNumber(found+stream.look())){
437                 found += stream.next();
438             }
439             
440             if (found === "") {
441                 return false;
442             }
443             else {
444                 if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
445                 else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
446                 return true;
447             }
448         },
449         /*t:
450             requires("../lib/JSDOC/TextStream.js");
451             requires("../lib/JSDOC/Token.js");
452             requires("../lib/JSDOC/Lang.js");
453             
454             plan(3, "testing read_numb");
455             
456             //// setup
457             var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
458             var tr = new TokenReader();
459             var tokens = tr.tokenize(new TextStream(src));
460             
461             var hexToken, octToken, decToken;
462             for (var i = 0; i < tokens.length; i++) {
463                 if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
464                 if (tokens[i].name == "OCTAL") octToken = tokens[i];
465                 if (tokens[i].name == "DECIMAL") decToken = tokens[i];
466             }
467             ////
468             
469             is(decToken.data, "8.0", "decimal number is found in source.");
470             is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
471             is(octToken.data, "0777", "octal number is found in source.");
472         */
473
474         /**
475             @returns {Boolean} Was the token found?
476          */
477         read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
478             var found = stream.next(2);
479             
480             while (!stream.look().eof) {
481                 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
482                     tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
483                     return true;
484                 }
485                 else {
486                     found += stream.next();
487                 }
488             }
489             return false;
490         },
491
492         /**
493             @returns {Boolean} Was the token found?
494          */
495         read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
496             var last;
497             if (
498                 stream.look() == "/"
499                 && 
500                 (
501                     
502                     (
503                         !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
504                         || 
505                         (
506                                !last.is("NUMB")
507                             && !last.is("NAME")
508                             && !last.is("RIGHT_PAREN")
509                             && !last.is("RIGHT_BRACKET")
510                         )
511                     )
512                 )
513             ) {
514                 var regex = stream.next();
515                 
516                 while (!stream.look().eof) {
517                     if (stream.look() == "\\") { // escape sequence
518                         regex += stream.next(2);
519                     }
520                     else if (stream.look() == "/") {
521                         regex += stream.next();
522                         
523                         while (/[gmi]/.test(stream.look())) {
524                             regex += stream.next();
525                         }
526                         
527                         tokens.push(new Token(regex, "REGX", "REGX", this.line));
528                         return true;
529                     }
530                     else {
531                         regex += stream.next();
532                     }
533                 }
534                 // error: unterminated regex
535             }
536             return false;
537         }
538 });