JSDOC/TokenReader.vala
[gnome.introspection-doc-generator] / JSDOC / TokenReader.vala
1 //<script type="text/javascript">
2
3  
4
5
6 //const Token   = imports.Token.Token;
7 //const Lang    = imports.Lang.Lang;
8
9 /**
10         @class Search a {@link JSDOC.TextStream} for language tokens.
11 */
12
13 namespace JSDOC {
14
15     public class TokenArray: Object {
16         
17         public Gee.ArrayList<Token> tokens;
18         public int length {
19             get { return this.tokens.size }
20         }
21         
22         public TokenArray()
23         {
24             this.items = new Gee.ArrayList<Token>();
25         }
26         
27         public Token? last() {
28             if (this.tokens > 0) {
29                 return this.tokens[this.tokens.length-1];
30             }
31             return null;
32         }
33         public Token? lastSym () {
34             for (var i = this.tokens.length-1; i >= 0; i--) {
35                 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM")))  {
36                     return this.tokens.get(i);
37                 }
38             }
39             return null;
40         }
41         public void push (Token t) {
42             this.tokens.add(t);
43         }
44         public Token get(int i) {
45             return this.tokens.get(i);
46         }
47     }
48
49
50     public class TokenReader : Object
51     {
52         
53         
54         
55         /*
56          *
57          * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
58          */
59         
60         /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
61         public bool collapseWhite = false, // only reduces white space...
62         /** @cfg {Boolean} keepDocs keep JSDOC comments **/
63         public bool keepDocs = true,
64         /** @cfg {Boolean} keepWhite keep White space **/
65         public bool keepWhite = false,
66         /** @cfg {Boolean} keepComments  keep all comments **/
67         public bool keepComments = false,
68         /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
69         public bool sepIdents = false,
70         /** @cfg {String} filename name of file being parsed. **/
71         public string filename = "";
72         /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
73         public bool ignoreBadGrammer = false,
74         
75         
76         int line = 0;
77         
78         /**
79          * tokenize a stream
80          * @return {Array} of tokens
81          * 
82          * ts = new TextStream(File.read(str));
83          * tr = TokenReader({ keepComments : true, keepWhite : true });
84          * tr.tokenize(ts)
85          * 
86          */
87         public TokenArray tokenize(TextStream stream)
88         {
89             this.line =1;
90             var tokens = new TokenArray();
91            
92             bool eof;
93             while (true) {
94                 
95                 stream.look(0, out eof) 
96                 if (eof) {
97                     break;
98                 }
99                 if (this.read_mlcomment(stream, tokens)) continue;
100                 if (this.read_slcomment(stream, tokens)) continue;
101                 if (this.read_dbquote(stream, tokens))   continue;
102                 if (this.read_snquote(stream, tokens))   continue;
103                 if (this.read_regx(stream, tokens))      continue;
104                 if (this.read_numb(stream, tokens))      continue;
105                 if (this.read_punc(stream, tokens))      continue;
106                 if (this.read_newline(stream, tokens))   continue;
107                 if (this.read_space(stream, tokens))     continue;
108                 if (this.read_word(stream, tokens))      continue;
109                 
110                 // if execution reaches here then an error has happened
111                 tokens.push(
112                         new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
113                 );
114             }
115             
116             
117             
118             return tokens;
119         },
120
121         /**
122          * findPuncToken - find the id of a token (previous to current)
123          * need to back check syntax..
124          * 
125          * @arg {Array} tokens the array of tokens.
126          * @arg {String} token data (eg. '(')
127          * @arg {Number} offset where to start reading from
128          * @return {Number} position of token
129          */
130         public int findPuncToken(TokenArray tokens, string data, int n) {
131             n = n || tokens.length -1;
132             var stack = 0;
133             while (n > -1) {
134                 
135                 if (!stack && tokens.get(n).data == data) {
136                     return n;
137                 }
138                 
139                 if (tokens.get(n).data  == ')' || tokens.get(n).data  == '}') {
140                     stack++;
141                     n--;
142                     continue;
143                 }
144                 if (stack && (tokens.get(n).data  == '{' || tokens.get(n).data  == '(')) {
145                     stack--;
146                     n--;
147                     continue;
148                 }
149                 
150                 
151                 n--;
152             }
153             return -1;
154         },
155         /**
156          * lastSym - find the last token symbol
157          * need to back check syntax..
158          * 
159          * @arg {Array} tokens the array of tokens.
160          * @arg {Number} offset where to start..
161          * @return {Token} the token
162          */
163         public Token lastSym(TokenArray tokens, int n) {
164             for (var i = n-1; i >= 0; i--) {
165                 if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
166                     return tokens.get(i);
167                 }
168             }
169             return null;
170         },
171         
172          
173         
174         /**
175             @returns {Boolean} Was the token found?
176          */
177         read_word : function(/**JSDOC.TokenStream*/stream, tokens) {
178             var found = "";
179             while (!stream.look().eof && Lang.isWordChar(stream.look())) {
180                 found += stream.next();
181             }
182             
183             if (found === "") {
184                 return false;
185             }
186             
187             var name;
188             if ((name = Lang.keyword(found))) {
189                 if (found == 'return' && tokens.lastSym().data == ')') {
190                     //Seed.print('@' + tokens.length);
191                     var n = this.findPuncToken(tokens, ')');
192                     //Seed.print(')@' + n);
193                     n = this.findPuncToken(tokens, '(', n-1);
194                     //Seed.print('(@' + n);
195                     
196                     var lt = this.lastSym(tokens, n);
197                     print(JSON.stringify(lt));
198                     if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) {
199                         if (!this.ignoreBadGrammer) {
200                             throw {
201                                 name : "ArgumentError", 
202                                 message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
203                             }
204                         }
205                     }
206                     
207                     
208                     
209                 }
210                 
211                 tokens.push(new Token(found, "KEYW", name, this.line));
212                 return true;
213             }
214             if (!this.sepIdents || found.indexOf('.') < 0 ) {
215                 tokens.push(new Token(found, "NAME", "NAME", this.line));
216                 return true;
217             }
218             var n = found.split('.');
219             var p = false;
220             var _this = this;
221             n.forEach(function(nm) {
222                 if (p) {
223                     tokens.push(new Token('.', "PUNC", "DOT", _this.line));
224                 }
225                 p=true;
226                 tokens.push(new Token(nm, "NAME", "NAME", _this.line));
227             });
228             return true;
229                 
230
231         },
232
233         /**
234             @returns {Boolean} Was the token found?
235          */
236         read_punc : function(/**JSDOC.TokenStream*/stream, tokens) {
237             var found = "";
238             var name;
239             while (!stream.look().eof && Lang.punc(found+stream.look())) {
240                 found += stream.next();
241             }
242             
243             
244             if (found === "") {
245                 return false;
246             }
247             
248             if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
249                 //print("Error - comma found before " + found);
250                 //print(JSON.stringify(tokens.lastSym(), null,4));
251                 if (this.ignoreBadGrammer) {
252                     print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
253                 } else {
254                     
255                     throw {
256                         name : "ArgumentError", 
257                         message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
258                     }
259                 }
260             }
261             
262             tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
263             return true;
264             
265         },
266
267         /**
268             @returns {Boolean} Was the token found?
269          */
270         read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
271             var found = "";
272             
273             while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
274                 found += stream.next();
275             }
276             
277             if (found === "") {
278                 return false;
279             }
280             //print("WHITE = " + JSON.stringify(found)); 
281             if (this.collapseWhite) found = " ";
282             if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
283             return true;
284         
285         },
286
287         /**
288             @returns {Boolean} Was the token found?
289          */
290         read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
291             var found = "";
292             var line = this.line;
293             while (!stream.look().eof && Lang.isNewline(stream.look())) {
294                 this.line++;
295                 found += stream.next();
296             }
297             
298             if (found === "") {
299                 return false;
300             }
301             //this.line++;
302             if (this.collapseWhite) {
303                 found = "\n";
304             }
305              if (this.keepWhite) {
306                 var last = tokens ? tokens.pop() : false;
307                 if (last && last.name != "WHIT") {
308                     tokens.push(last);
309                 }
310                 
311                 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
312             }
313             return true;
314         },
315
316         /**
317             @returns {Boolean} Was the token found?
318          */
319         read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
320             if (stream.look() == "/" && stream.look(1) == "*") {
321                 var found = stream.next(2);
322                 var c = '';
323                 var line = this.line;
324                 while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
325                     c = stream.next();
326                     if (c == "\n") this.line++;
327                     found += c;
328                 }
329                 
330                 // to start doclet we allow /** or /*** but not /**/ or /****
331                 if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
332                 else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
333                 return true;
334             }
335             return false;
336         },
337
338         /**
339             @returns {Boolean} Was the token found?
340          */
341         read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
342             var found;
343             if (
344                 (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
345                 || 
346                 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
347             ) {
348                 var line = this.line;
349                 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
350                     found += stream.next();
351                 }
352                 if (!stream.look().eof) {
353                     found += stream.next();
354                 }
355                 if (this.keepComments) {
356                     tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
357                 }
358                 this.line++;
359                 return true;
360             }
361             return false;
362         },
363
364         /**
365             @returns {Boolean} Was the token found?
366          */
367         read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
368             if (stream.look() == "\"") {
369                 // find terminator
370                 var string = stream.next();
371                 
372                 while (!stream.look().eof) {
373                     if (stream.look() == "\\") {
374                         if (Lang.isNewline(stream.look(1))) {
375                             do {
376                                 stream.next();
377                             } while (!stream.look().eof && Lang.isNewline(stream.look()));
378                             string += "\\\n";
379                         }
380                         else {
381                             string += stream.next(2);
382                         }
383                     }
384                     else if (stream.look() == "\"") {
385                         string += stream.next();
386                         tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
387                         return true;
388                     }
389                     else {
390                         string += stream.next();
391                     }
392                 }
393             }
394             return false; // error! unterminated string
395         },
396
397         /**
398             @returns {Boolean} Was the token found?
399          */
400         read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
401             if (stream.look() == "'") {
402                 // find terminator
403                 var string = stream.next();
404                 
405                 while (!stream.look().eof) {
406                     if (stream.look() == "\\") { // escape sequence
407                         string += stream.next(2);
408                     }
409                     else if (stream.look() == "'") {
410                         string += stream.next();
411                         tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
412                         return true;
413                     }
414                     else {
415                         string += stream.next();
416                     }
417                 }
418             }
419             return false; // error! unterminated string
420         },
421
422         /**
423             @returns {Boolean} Was the token found?
424          */
425         read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
426             if (stream.look() === "0" && stream.look(1) == "x") {
427                 return this.read_hex(stream, tokens);
428             }
429             
430             var found = "";
431             
432             while (!stream.look().eof && Lang.isNumber(found+stream.look())){
433                 found += stream.next();
434             }
435             
436             if (found === "") {
437                 return false;
438             }
439             else {
440                 if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
441                 else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
442                 return true;
443             }
444         },
445         /*t:
446             requires("../lib/JSDOC/TextStream.js");
447             requires("../lib/JSDOC/Token.js");
448             requires("../lib/JSDOC/Lang.js");
449             
450             plan(3, "testing read_numb");
451             
452             //// setup
453             var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
454             var tr = new TokenReader();
455             var tokens = tr.tokenize(new TextStream(src));
456             
457             var hexToken, octToken, decToken;
458             for (var i = 0; i < tokens.length; i++) {
459                 if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
460                 if (tokens[i].name == "OCTAL") octToken = tokens[i];
461                 if (tokens[i].name == "DECIMAL") decToken = tokens[i];
462             }
463             ////
464             
465             is(decToken.data, "8.0", "decimal number is found in source.");
466             is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
467             is(octToken.data, "0777", "octal number is found in source.");
468         */
469
470         /**
471             @returns {Boolean} Was the token found?
472          */
473         read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
474             var found = stream.next(2);
475             
476             while (!stream.look().eof) {
477                 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
478                     tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
479                     return true;
480                 }
481                 else {
482                     found += stream.next();
483                 }
484             }
485             return false;
486         },
487
488         /**
489             @returns {Boolean} Was the token found?
490          */
491         read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
492             var last;
493             if (
494                 stream.look() == "/"
495                 && 
496                 (
497                     
498                     (
499                         !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
500                         || 
501                         (
502                                !last.is("NUMB")
503                             && !last.is("NAME")
504                             && !last.is("RIGHT_PAREN")
505                             && !last.is("RIGHT_BRACKET")
506                         )
507                     )
508                 )
509             ) {
510                 var regex = stream.next();
511                 
512                 while (!stream.look().eof) {
513                     if (stream.look() == "\\") { // escape sequence
514                         regex += stream.next(2);
515                     }
516                     else if (stream.look() == "/") {
517                         regex += stream.next();
518                         
519                         while (/[gmi]/.test(stream.look())) {
520                             regex += stream.next();
521                         }
522                         
523                         tokens.push(new Token(regex, "REGX", "REGX", this.line));
524                         return true;
525                     }
526                     else {
527                         regex += stream.next();
528                     }
529                 }
530                 // error: unterminated regex
531             }
532             return false;
533         }
534 });