JSDOC/TokenReader.vala
[gnome.introspection-doc-generator] / JSDOC / TokenReader.vala
1 //<script type="text/javascript">
2
3  
4
5
6 //const Token   = imports.Token.Token;
7 //const Lang    = imports.Lang.Lang;
8
9 /**
10         @class Search a {@link JSDOC.TextStream} for language tokens.
11 */
12
13 namespace JSDOC {
14
15     public class TokenArray: Object {
16         
17         public Gee.ArrayList<Token> tokens;
18         public int length {
19             get { return this.tokens.size }
20         }
21         
22         public TokenArray()
23         {
24             this.items = new Gee.ArrayList<Token>();
25         }
26         
27         public Token? last() {
28             if (this.tokens > 0) {
29                 return this.tokens[this.tokens.length-1];
30             }
31             return null;
32         }
33         public Token? lastSym () {
34             for (var i = this.tokens.length-1; i >= 0; i--) {
35                 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM")))  {
36                     return this.tokens.get(i);
37                 }
38             }
39             return null;
40         }
41         public void push (Token t) {
42             this.tokens.add(t);
43         }
44         public Token get(int i) {
45             return this.tokens.get(i);
46         }
47     }
48
49
50     public class TokenReader : Object
51     {
52         
53         
54         
55         /*
56          *
57          * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
58          */
59         
60         /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
61         public bool collapseWhite = false, // only reduces white space...
62         /** @cfg {Boolean} keepDocs keep JSDOC comments **/
63         public bool keepDocs = true,
64         /** @cfg {Boolean} keepWhite keep White space **/
65         public bool keepWhite = false,
66         /** @cfg {Boolean} keepComments  keep all comments **/
67         public bool keepComments = false,
68         /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
69         public bool sepIdents = false,
70         /** @cfg {String} filename name of file being parsed. **/
71         public string filename = "";
72         /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
73         public bool ignoreBadGrammer = false,
74         
75         
76         int line = 0;
77         
78         /**
79          * tokenize a stream
80          * @return {Array} of tokens
81          * 
82          * ts = new TextStream(File.read(str));
83          * tr = TokenReader({ keepComments : true, keepWhite : true });
84          * tr.tokenize(ts)
85          * 
86          */
87         public TokenArray tokenize(TextStream stream)
88         {
89             this.line =1;
90             var tokens = new TokenArray();
91            
92             bool eof;
93             while (!stream.lookEOF()) {
94                 
95                 
96                 if (this.read_mlcomment(stream, tokens)) continue;
97                 if (this.read_slcomment(stream, tokens)) continue;
98                 if (this.read_dbquote(stream, tokens))   continue;
99                 if (this.read_snquote(stream, tokens))   continue;
100                 if (this.read_regx(stream, tokens))      continue;
101                 if (this.read_numb(stream, tokens))      continue;
102                 if (this.read_punc(stream, tokens))      continue;
103                 if (this.read_newline(stream, tokens))   continue;
104                 if (this.read_space(stream, tokens))     continue;
105                 if (this.read_word(stream, tokens))      continue;
106                 
107                 // if execution reaches here then an error has happened
108                 tokens.push(
109                         new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
110                 );
111             }
112             
113             
114             
115             return tokens;
116         },
117
118         /**
119          * findPuncToken - find the id of a token (previous to current)
120          * need to back check syntax..
121          * 
122          * @arg {Array} tokens the array of tokens.
123          * @arg {String} token data (eg. '(')
124          * @arg {Number} offset where to start reading from
125          * @return {Number} position of token
126          */
127         public int findPuncToken(TokenArray tokens, string data, int n) {
128             n = n || tokens.length -1;
129             var stack = 0;
130             while (n > -1) {
131                 
132                 if (!stack && tokens.get(n).data == data) {
133                     return n;
134                 }
135                 
136                 if (tokens.get(n).data  == ')' || tokens.get(n).data  == '}') {
137                     stack++;
138                     n--;
139                     continue;
140                 }
141                 if (stack && (tokens.get(n).data  == '{' || tokens.get(n).data  == '(')) {
142                     stack--;
143                     n--;
144                     continue;
145                 }
146                 
147                 
148                 n--;
149             }
150             return -1;
151         },
152         /**
153          * lastSym - find the last token symbol
154          * need to back check syntax..
155          * 
156          * @arg {Array} tokens the array of tokens.
157          * @arg {Number} offset where to start..
158          * @return {Token} the token
159          */
160         public Token lastSym(TokenArray tokens, int n) {
161             for (var i = n-1; i >= 0; i--) {
162                 if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
163                     return tokens.get(i);
164                 }
165             }
166             return null;
167         },
168         
169          
170         
171         /**
172             @returns {Boolean} Was the token found?
173          */
174         public bool read_word (TokenStream stream, TokenArray tokens) {
175             var found = "";
176             while (!stream.look().eof && Lang.isWordChar(stream.look())) {
177                 found += stream.next();
178             }
179             
180             if (found === "") {
181                 return false;
182             }
183             
184             var name;
185             if ((name = Lang.keyword(found))) {
186                 if (found == 'return' && tokens.lastSym().data == ')') {
187                     //Seed.print('@' + tokens.length);
188                     var n = this.findPuncToken(tokens, ')');
189                     //Seed.print(')@' + n);
190                     n = this.findPuncToken(tokens, '(', n-1);
191                     //Seed.print('(@' + n);
192                     
193                     var lt = this.lastSym(tokens, n);
194                     print(JSON.stringify(lt));
195                     if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) {
196                         if (!this.ignoreBadGrammer) {
197                             throw {
198                                 name : "ArgumentError", 
199                                 message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
200                             }
201                         }
202                     }
203                     
204                     
205                     
206                 }
207                 
208                 tokens.push(new Token(found, "KEYW", name, this.line));
209                 return true;
210             }
211             if (!this.sepIdents || found.indexOf('.') < 0 ) {
212                 tokens.push(new Token(found, "NAME", "NAME", this.line));
213                 return true;
214             }
215             var n = found.split('.');
216             var p = false;
217             var _this = this;
218             n.forEach(function(nm) {
219                 if (p) {
220                     tokens.push(new Token('.', "PUNC", "DOT", _this.line));
221                 }
222                 p=true;
223                 tokens.push(new Token(nm, "NAME", "NAME", _this.line));
224             });
225             return true;
226                 
227
228         },
229
230         /**
231             @returns {Boolean} Was the token found?
232          */
233         read_punc : function(/**JSDOC.TokenStream*/stream, tokens) {
234             var found = "";
235             var name;
236             while (!stream.look().eof && Lang.punc(found+stream.look())) {
237                 found += stream.next();
238             }
239             
240             
241             if (found === "") {
242                 return false;
243             }
244             
245             if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
246                 //print("Error - comma found before " + found);
247                 //print(JSON.stringify(tokens.lastSym(), null,4));
248                 if (this.ignoreBadGrammer) {
249                     print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
250                 } else {
251                     
252                     throw {
253                         name : "ArgumentError", 
254                         message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
255                     }
256                 }
257             }
258             
259             tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
260             return true;
261             
262         },
263
264         /**
265             @returns {Boolean} Was the token found?
266          */
267         read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
268             var found = "";
269             
270             while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
271                 found += stream.next();
272             }
273             
274             if (found === "") {
275                 return false;
276             }
277             //print("WHITE = " + JSON.stringify(found)); 
278             if (this.collapseWhite) found = " ";
279             if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
280             return true;
281         
282         },
283
284         /**
285             @returns {Boolean} Was the token found?
286          */
287         read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
288             var found = "";
289             var line = this.line;
290             while (!stream.look().eof && Lang.isNewline(stream.look())) {
291                 this.line++;
292                 found += stream.next();
293             }
294             
295             if (found === "") {
296                 return false;
297             }
298             //this.line++;
299             if (this.collapseWhite) {
300                 found = "\n";
301             }
302              if (this.keepWhite) {
303                 var last = tokens ? tokens.pop() : false;
304                 if (last && last.name != "WHIT") {
305                     tokens.push(last);
306                 }
307                 
308                 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
309             }
310             return true;
311         },
312
313         /**
314             @returns {Boolean} Was the token found?
315          */
316         read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
317             if (stream.look() == "/" && stream.look(1) == "*") {
318                 var found = stream.next(2);
319                 var c = '';
320                 var line = this.line;
321                 while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
322                     c = stream.next();
323                     if (c == "\n") this.line++;
324                     found += c;
325                 }
326                 
327                 // to start doclet we allow /** or /*** but not /**/ or /****
328                 if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
329                 else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
330                 return true;
331             }
332             return false;
333         },
334
335         /**
336             @returns {Boolean} Was the token found?
337          */
338         read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
339             var found;
340             if (
341                 (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
342                 || 
343                 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
344             ) {
345                 var line = this.line;
346                 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
347                     found += stream.next();
348                 }
349                 if (!stream.look().eof) {
350                     found += stream.next();
351                 }
352                 if (this.keepComments) {
353                     tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
354                 }
355                 this.line++;
356                 return true;
357             }
358             return false;
359         },
360
361         /**
362             @returns {Boolean} Was the token found?
363          */
364         read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
365             if (stream.look() == "\"") {
366                 // find terminator
367                 var string = stream.next();
368                 
369                 while (!stream.look().eof) {
370                     if (stream.look() == "\\") {
371                         if (Lang.isNewline(stream.look(1))) {
372                             do {
373                                 stream.next();
374                             } while (!stream.look().eof && Lang.isNewline(stream.look()));
375                             string += "\\\n";
376                         }
377                         else {
378                             string += stream.next(2);
379                         }
380                     }
381                     else if (stream.look() == "\"") {
382                         string += stream.next();
383                         tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
384                         return true;
385                     }
386                     else {
387                         string += stream.next();
388                     }
389                 }
390             }
391             return false; // error! unterminated string
392         },
393
394         /**
395             @returns {Boolean} Was the token found?
396          */
397         read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
398             if (stream.look() == "'") {
399                 // find terminator
400                 var string = stream.next();
401                 
402                 while (!stream.look().eof) {
403                     if (stream.look() == "\\") { // escape sequence
404                         string += stream.next(2);
405                     }
406                     else if (stream.look() == "'") {
407                         string += stream.next();
408                         tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
409                         return true;
410                     }
411                     else {
412                         string += stream.next();
413                     }
414                 }
415             }
416             return false; // error! unterminated string
417         },
418
419         /**
420             @returns {Boolean} Was the token found?
421          */
422         read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
423             if (stream.look() === "0" && stream.look(1) == "x") {
424                 return this.read_hex(stream, tokens);
425             }
426             
427             var found = "";
428             
429             while (!stream.look().eof && Lang.isNumber(found+stream.look())){
430                 found += stream.next();
431             }
432             
433             if (found === "") {
434                 return false;
435             }
436             else {
437                 if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
438                 else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
439                 return true;
440             }
441         },
442         /*t:
443             requires("../lib/JSDOC/TextStream.js");
444             requires("../lib/JSDOC/Token.js");
445             requires("../lib/JSDOC/Lang.js");
446             
447             plan(3, "testing read_numb");
448             
449             //// setup
450             var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
451             var tr = new TokenReader();
452             var tokens = tr.tokenize(new TextStream(src));
453             
454             var hexToken, octToken, decToken;
455             for (var i = 0; i < tokens.length; i++) {
456                 if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
457                 if (tokens[i].name == "OCTAL") octToken = tokens[i];
458                 if (tokens[i].name == "DECIMAL") decToken = tokens[i];
459             }
460             ////
461             
462             is(decToken.data, "8.0", "decimal number is found in source.");
463             is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
464             is(octToken.data, "0777", "octal number is found in source.");
465         */
466
467         /**
468             @returns {Boolean} Was the token found?
469          */
470         read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
471             var found = stream.next(2);
472             
473             while (!stream.look().eof) {
474                 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
475                     tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
476                     return true;
477                 }
478                 else {
479                     found += stream.next();
480                 }
481             }
482             return false;
483         },
484
485         /**
486             @returns {Boolean} Was the token found?
487          */
488         read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
489             var last;
490             if (
491                 stream.look() == "/"
492                 && 
493                 (
494                     
495                     (
496                         !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
497                         || 
498                         (
499                                !last.is("NUMB")
500                             && !last.is("NAME")
501                             && !last.is("RIGHT_PAREN")
502                             && !last.is("RIGHT_BRACKET")
503                         )
504                     )
505                 )
506             ) {
507                 var regex = stream.next();
508                 
509                 while (!stream.look().eof) {
510                     if (stream.look() == "\\") { // escape sequence
511                         regex += stream.next(2);
512                     }
513                     else if (stream.look() == "/") {
514                         regex += stream.next();
515                         
516                         while (/[gmi]/.test(stream.look())) {
517                             regex += stream.next();
518                         }
519                         
520                         tokens.push(new Token(regex, "REGX", "REGX", this.line));
521                         return true;
522                     }
523                     else {
524                         regex += stream.next();
525                     }
526                 }
527                 // error: unterminated regex
528             }
529             return false;
530         }
531 });