7c458613ee8d29dfe473d71520b77bd37d0adfc7
[gnome.introspection-doc-generator] / JSDOC / TokenReader.vala
1 //<script type="text/javascript">
2
3  
4
5
6 //const Token   = imports.Token.Token;
7 //const Lang    = imports.Lang.Lang;
8
9 /**
10         @class Search a {@link JSDOC.TextStream} for language tokens.
11 */
12
13 namespace JSDOC {
14
15     public class TokenArray: Object {
16         
17         Gee.ArrayList<Token> tokens;
18         
19         public TokenArray()
20         {
21             this.items = new Gee.ArrayList<Token>();
22         }
23         
24         public Token? last() {
25             if (this.tokens > 0) {
26                 return this.tokens[this.tokens.length-1];
27             }
28             return null;
29         }
30         public Token? lastSym () {
31             for (var i = this.tokens.length-1; i >= 0; i--) {
32                 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM")))  {
33                     return this.tokens.get(i);
34                 }
35             }
36             return null;
37         }
38         public void push (Token t) {
39             this.tokens.add(t);
40         }
41     }
42
43
44     public class TokenReader : Object
45     {
46         
47         
48         
49         /*
50          *
51          * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
52          */
53         
54         /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
55         public bool collapseWhite = false, // only reduces white space...
56         /** @cfg {Boolean} keepDocs keep JSDOC comments **/
57         public bool keepDocs = true,
58         /** @cfg {Boolean} keepWhite keep White space **/
59         public bool keepWhite = false,
60         /** @cfg {Boolean} keepComments  keep all comments **/
61         public bool keepComments = false,
62         /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
63         public bool sepIdents = false,
64         /** @cfg {String} filename name of file being parsed. **/
65         public string filename = "";
66         /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
67         public bool ignoreBadGrammer = false,
68         
69         
70         int line = 0;
71         
72         /**
73          * tokenize a stream
74          * @return {Array} of tokens
75          * 
76          * ts = new TextStream(File.read(str));
77          * tr = TokenReader({ keepComments : true, keepWhite : true });
78          * tr.tokenize(ts)
79          * 
80          */
81         public TokenArray tokenize(TextStream stream)
82         {
83             this.line =1;
84             var tokens = new TokenArray();
85            
86             bool eof;
87             while (true) {
88                 
89                 stream.look(0, out eof) 
90                 if (eof) {
91                     break;
92                 }
93                 if (this.read_mlcomment(stream, tokens)) continue;
94                 if (this.read_slcomment(stream, tokens)) continue;
95                 if (this.read_dbquote(stream, tokens))   continue;
96                 if (this.read_snquote(stream, tokens))   continue;
97                 if (this.read_regx(stream, tokens))      continue;
98                 if (this.read_numb(stream, tokens))      continue;
99                 if (this.read_punc(stream, tokens))      continue;
100                 if (this.read_newline(stream, tokens))   continue;
101                 if (this.read_space(stream, tokens))     continue;
102                 if (this.read_word(stream, tokens))      continue;
103                 
104                 // if execution reaches here then an error has happened
105                 tokens.push(new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line));
106             }
107             
108             
109             
110             return tokens;
111         },
112
113         /**
114          * findPuncToken - find the id of a token (previous to current)
115          * need to back check syntax..
116          * 
117          * @arg {Array} tokens the array of tokens.
118          * @arg {String} token data (eg. '(')
119          * @arg {Number} offset where to start reading from
120          * @return {Number} position of token
121          */
122         findPuncToken : function(tokens, data, n) {
123             n = n || tokens.length -1;
124             var stack = 0;
125             while (n > -1) {
126                 
127                 if (!stack && tokens[n].data == data) {
128                     return n;
129                 }
130                 
131                 if (tokens[n].data  == ')' || tokens[n].data  == '}') {
132                     stack++;
133                     n--;
134                     continue;
135                 }
136                 if (stack && (tokens[n].data  == '{' || tokens[n].data  == '(')) {
137                     stack--;
138                     n--;
139                     continue;
140                 }
141                 
142                 
143                 n--;
144             }
145             return -1;
146         },
147         /**
148          * lastSym - find the last token symbol
149          * need to back check syntax..
150          * 
151          * @arg {Array} tokens the array of tokens.
152          * @arg {Number} offset where to start..
153          * @return {Token} the token
154          */
155         lastSym : function(tokens, n) {
156             for (var i = n-1; i >= 0; i--) {
157                 if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
158             }
159             return null;
160         },
161         
162          
163         
164         /**
165             @returns {Boolean} Was the token found?
166          */
167         read_word : function(/**JSDOC.TokenStream*/stream, tokens) {
168             var found = "";
169             while (!stream.look().eof && Lang.isWordChar(stream.look())) {
170                 found += stream.next();
171             }
172             
173             if (found === "") {
174                 return false;
175             }
176             
177             var name;
178             if ((name = Lang.keyword(found))) {
179                 if (found == 'return' && tokens.lastSym().data == ')') {
180                     //Seed.print('@' + tokens.length);
181                     var n = this.findPuncToken(tokens, ')');
182                     //Seed.print(')@' + n);
183                     n = this.findPuncToken(tokens, '(', n-1);
184                     //Seed.print('(@' + n);
185                     
186                     var lt = this.lastSym(tokens, n);
187                     print(JSON.stringify(lt));
188                     if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) {
189                         if (!this.ignoreBadGrammer) {
190                             throw {
191                                 name : "ArgumentError", 
192                                 message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
193                             }
194                         }
195                     }
196                     
197                     
198                     
199                 }
200                 
201                 tokens.push(new Token(found, "KEYW", name, this.line));
202                 return true;
203             }
204             if (!this.sepIdents || found.indexOf('.') < 0 ) {
205                 tokens.push(new Token(found, "NAME", "NAME", this.line));
206                 return true;
207             }
208             var n = found.split('.');
209             var p = false;
210             var _this = this;
211             n.forEach(function(nm) {
212                 if (p) {
213                     tokens.push(new Token('.', "PUNC", "DOT", _this.line));
214                 }
215                 p=true;
216                 tokens.push(new Token(nm, "NAME", "NAME", _this.line));
217             });
218             return true;
219                 
220
221         },
222
223         /**
224             @returns {Boolean} Was the token found?
225          */
226         read_punc : function(/**JSDOC.TokenStream*/stream, tokens) {
227             var found = "";
228             var name;
229             while (!stream.look().eof && Lang.punc(found+stream.look())) {
230                 found += stream.next();
231             }
232             
233             
234             if (found === "") {
235                 return false;
236             }
237             
238             if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
239                 //print("Error - comma found before " + found);
240                 //print(JSON.stringify(tokens.lastSym(), null,4));
241                 if (this.ignoreBadGrammer) {
242                     print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
243                 } else {
244                     
245                     throw {
246                         name : "ArgumentError", 
247                         message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
248                     }
249                 }
250             }
251             
252             tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
253             return true;
254             
255         },
256
257         /**
258             @returns {Boolean} Was the token found?
259          */
260         read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
261             var found = "";
262             
263             while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
264                 found += stream.next();
265             }
266             
267             if (found === "") {
268                 return false;
269             }
270             //print("WHITE = " + JSON.stringify(found)); 
271             if (this.collapseWhite) found = " ";
272             if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
273             return true;
274         
275         },
276
277         /**
278             @returns {Boolean} Was the token found?
279          */
280         read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
281             var found = "";
282             var line = this.line;
283             while (!stream.look().eof && Lang.isNewline(stream.look())) {
284                 this.line++;
285                 found += stream.next();
286             }
287             
288             if (found === "") {
289                 return false;
290             }
291             //this.line++;
292             if (this.collapseWhite) {
293                 found = "\n";
294             }
295              if (this.keepWhite) {
296                 var last = tokens ? tokens.pop() : false;
297                 if (last && last.name != "WHIT") {
298                     tokens.push(last);
299                 }
300                 
301                 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
302             }
303             return true;
304         },
305
306         /**
307             @returns {Boolean} Was the token found?
308          */
309         read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
310             if (stream.look() == "/" && stream.look(1) == "*") {
311                 var found = stream.next(2);
312                 var c = '';
313                 var line = this.line;
314                 while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
315                     c = stream.next();
316                     if (c == "\n") this.line++;
317                     found += c;
318                 }
319                 
320                 // to start doclet we allow /** or /*** but not /**/ or /****
321                 if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
322                 else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
323                 return true;
324             }
325             return false;
326         },
327
328         /**
329             @returns {Boolean} Was the token found?
330          */
331         read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
332             var found;
333             if (
334                 (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
335                 || 
336                 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
337             ) {
338                 var line = this.line;
339                 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
340                     found += stream.next();
341                 }
342                 if (!stream.look().eof) {
343                     found += stream.next();
344                 }
345                 if (this.keepComments) {
346                     tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
347                 }
348                 this.line++;
349                 return true;
350             }
351             return false;
352         },
353
354         /**
355             @returns {Boolean} Was the token found?
356          */
357         read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
358             if (stream.look() == "\"") {
359                 // find terminator
360                 var string = stream.next();
361                 
362                 while (!stream.look().eof) {
363                     if (stream.look() == "\\") {
364                         if (Lang.isNewline(stream.look(1))) {
365                             do {
366                                 stream.next();
367                             } while (!stream.look().eof && Lang.isNewline(stream.look()));
368                             string += "\\\n";
369                         }
370                         else {
371                             string += stream.next(2);
372                         }
373                     }
374                     else if (stream.look() == "\"") {
375                         string += stream.next();
376                         tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
377                         return true;
378                     }
379                     else {
380                         string += stream.next();
381                     }
382                 }
383             }
384             return false; // error! unterminated string
385         },
386
387         /**
388             @returns {Boolean} Was the token found?
389          */
390         read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
391             if (stream.look() == "'") {
392                 // find terminator
393                 var string = stream.next();
394                 
395                 while (!stream.look().eof) {
396                     if (stream.look() == "\\") { // escape sequence
397                         string += stream.next(2);
398                     }
399                     else if (stream.look() == "'") {
400                         string += stream.next();
401                         tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
402                         return true;
403                     }
404                     else {
405                         string += stream.next();
406                     }
407                 }
408             }
409             return false; // error! unterminated string
410         },
411
412         /**
413             @returns {Boolean} Was the token found?
414          */
415         read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
416             if (stream.look() === "0" && stream.look(1) == "x") {
417                 return this.read_hex(stream, tokens);
418             }
419             
420             var found = "";
421             
422             while (!stream.look().eof && Lang.isNumber(found+stream.look())){
423                 found += stream.next();
424             }
425             
426             if (found === "") {
427                 return false;
428             }
429             else {
430                 if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
431                 else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
432                 return true;
433             }
434         },
435         /*t:
436             requires("../lib/JSDOC/TextStream.js");
437             requires("../lib/JSDOC/Token.js");
438             requires("../lib/JSDOC/Lang.js");
439             
440             plan(3, "testing read_numb");
441             
442             //// setup
443             var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
444             var tr = new TokenReader();
445             var tokens = tr.tokenize(new TextStream(src));
446             
447             var hexToken, octToken, decToken;
448             for (var i = 0; i < tokens.length; i++) {
449                 if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
450                 if (tokens[i].name == "OCTAL") octToken = tokens[i];
451                 if (tokens[i].name == "DECIMAL") decToken = tokens[i];
452             }
453             ////
454             
455             is(decToken.data, "8.0", "decimal number is found in source.");
456             is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
457             is(octToken.data, "0777", "octal number is found in source.");
458         */
459
460         /**
461             @returns {Boolean} Was the token found?
462          */
463         read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
464             var found = stream.next(2);
465             
466             while (!stream.look().eof) {
467                 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
468                     tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
469                     return true;
470                 }
471                 else {
472                     found += stream.next();
473                 }
474             }
475             return false;
476         },
477
478         /**
479             @returns {Boolean} Was the token found?
480          */
481         read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
482             var last;
483             if (
484                 stream.look() == "/"
485                 && 
486                 (
487                     
488                     (
489                         !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
490                         || 
491                         (
492                                !last.is("NUMB")
493                             && !last.is("NAME")
494                             && !last.is("RIGHT_PAREN")
495                             && !last.is("RIGHT_BRACKET")
496                         )
497                     )
498                 )
499             ) {
500                 var regex = stream.next();
501                 
502                 while (!stream.look().eof) {
503                     if (stream.look() == "\\") { // escape sequence
504                         regex += stream.next(2);
505                     }
506                     else if (stream.look() == "/") {
507                         regex += stream.next();
508                         
509                         while (/[gmi]/.test(stream.look())) {
510                             regex += stream.next();
511                         }
512                         
513                         tokens.push(new Token(regex, "REGX", "REGX", this.line));
514                         return true;
515                     }
516                     else {
517                         regex += stream.next();
518                     }
519                 }
520                 // error: unterminated regex
521             }
522             return false;
523         }
524 });