bbdc179a49422f4abeae718b97978c14e4b9aa5a
[gnome.introspection-doc-generator] / JSDOC / TokenReader.vala
1 //<script type="text/javascript">
2
3  
4
5
6 //const Token   = imports.Token.Token;
7 //const Lang    = imports.Lang.Lang;
8
9 /**
10         @class Search a {@link JSDOC.TextStream} for language tokens.
11 */
12
13 namespace JSDOC {
14
15     public class TokenArray: Object {
16         
17         public Gee.ArrayList<Token> tokens;
18         public int length {
19             get { return this.tokens.size }
20         }
21         
22         public TokenArray()
23         {
24             this.items = new Gee.ArrayList<Token>();
25         }
26         
27         public Token? last() {
28             if (this.tokens > 0) {
29                 return this.tokens[this.tokens.length-1];
30             }
31             return null;
32         }
33         public Token? lastSym () {
34             for (var i = this.tokens.length-1; i >= 0; i--) {
35                 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM")))  {
36                     return this.tokens.get(i);
37                 }
38             }
39             return null;
40         }
41         public void push (Token t) {
42             this.tokens.add(t);
43         }
44         public Token get(int i) {
45             return this.tokens.get(i);
46         }
47     }
48
49     errordomain TokenReader_Error {
50             ArgumentError
51     }
52     
53
54     public class TokenReader : Object
55     {
56         
57         
58         
59         /*
60          *
61          * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
62          */
63         
64         /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
65         public bool collapseWhite = false, // only reduces white space...
66         /** @cfg {Boolean} keepDocs keep JSDOC comments **/
67         public bool keepDocs = true,
68         /** @cfg {Boolean} keepWhite keep White space **/
69         public bool keepWhite = false,
70         /** @cfg {Boolean} keepComments  keep all comments **/
71         public bool keepComments = false,
72         /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
73         public bool sepIdents = false,
74         /** @cfg {String} filename name of file being parsed. **/
75         public string filename = "";
76         /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
77         public bool ignoreBadGrammer = false,
78         
79         
80         int line = 0;
81         
82         /**
83          * tokenize a stream
84          * @return {Array} of tokens
85          * 
86          * ts = new TextStream(File.read(str));
87          * tr = TokenReader({ keepComments : true, keepWhite : true });
88          * tr.tokenize(ts)
89          * 
90          */
91         public TokenArray tokenize(TextStream stream)
92         {
93             this.line =1;
94             var tokens = new TokenArray();
95            
96             bool eof;
97             while (!stream.lookEOF()) {
98                 
99                 
100                 if (this.read_mlcomment(stream, tokens)) continue;
101                 if (this.read_slcomment(stream, tokens)) continue;
102                 if (this.read_dbquote(stream, tokens))   continue;
103                 if (this.read_snquote(stream, tokens))   continue;
104                 if (this.read_regx(stream, tokens))      continue;
105                 if (this.read_numb(stream, tokens))      continue;
106                 if (this.read_punc(stream, tokens))      continue;
107                 if (this.read_newline(stream, tokens))   continue;
108                 if (this.read_space(stream, tokens))     continue;
109                 if (this.read_word(stream, tokens))      continue;
110                 
111                 // if execution reaches here then an error has happened
112                 tokens.push(
113                         new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
114                 );
115             }
116             
117             
118             
119             return tokens;
120         }
121
122         /**
123          * findPuncToken - find the id of a token (previous to current)
124          * need to back check syntax..
125          * 
126          * @arg {Array} tokens the array of tokens.
127          * @arg {String} token data (eg. '(')
128          * @arg {Number} offset where to start reading from
129          * @return {Number} position of token
130          */
131         public int findPuncToken(TokenArray tokens, string data, int n)
132         {
133             n = n || tokens.length -1;
134             var stack = 0;
135             while (n > -1) {
136                 
137                 if (!stack && tokens.get(n).data == data) {
138                     return n;
139                 }
140                 
141                 if (tokens.get(n).data  == ')' || tokens.get(n).data  == '}') {
142                     stack++;
143                     n--;
144                     continue;
145                 }
146                 if (stack && (tokens.get(n).data  == '{' || tokens.get(n).data  == '(')) {
147                     stack--;
148                     n--;
149                     continue;
150                 }
151                 
152                 
153                 n--;
154             }
155             return -1;
156         }
157         /**
158          * lastSym - find the last token symbol
159          * need to back check syntax..
160          * 
161          * @arg {Array} tokens the array of tokens.
162          * @arg {Number} offset where to start..
163          * @return {Token} the token
164          */
165         public Token lastSym(TokenArray tokens, int n)
166         {
167             for (var i = n-1; i >= 0; i--) {
168                 if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
169                     return tokens.get(i);
170                 }
171             }
172             return null;
173         }
174         
175          
176         
177         /**
178             @returns {Boolean} Was the token found?
179          */
180         public bool read_word (TokenStream stream, TokenArray tokens)
181         {
182             string found = "";
183             while (!stream.lookEOF() && Lang.isWordChar(stream.look())) {
184                 found += stream.next();
185             }
186             
187             if (found == "") {
188                 return false;
189             }
190             
191             var name = Lang.keyword(found);
192             if (name != null) {
193                 
194                 // look for "()return" ?? why ???
195                 var ls = tokens.lastSym();
196                 if (found == "return" && ls != null && ls.data == ")") {
197                     //Seed.print('@' + tokens.length);
198                     var n = this.findPuncToken(tokens, ")");
199                     //Seed.print(')@' + n);
200                     n = this.findPuncToken(tokens, "(", n-1);
201                     //Seed.print('(@' + n);
202                     
203                     var lt = this.lastSym(tokens, n);
204                     
205                     //print(JSON.stringify(lt));
206                     if (lt.type != "KEYW" || ["IF", 'WHILE'].indexOf(lt.name) < -1) {
207                         if (!this.ignoreBadGrammer) {
208                             throw new TokenReader_Error.ArgumentError(
209                                 this.filename + ":" + this.line + " Error - return found after )"
210                             );
211                         }
212                     }
213                     
214                     
215                     
216                 }
217                 
218                 tokens.push(new Token(found, "KEYW", name, this.line));
219                 return true;
220             }
221             
222             if (!this.sepIdents || found.indexOf('.') < 0 ) {
223                 tokens.push(new Token(found, "NAME", "NAME", this.line));
224                 return true;
225             }
226             var n = found.split('.');
227             var p = false;
228             foreach (unowned string nm in n) {
229                 if (p) {
230                     tokens.push(new Token('.', "PUNC", "DOT", this.line));
231                 }
232                 p=true;
233                 tokens.push(new Token(nm, "NAME", "NAME", this.line));
234             }
235             return true;
236                 
237
238         }
239
240         /**
241             @returns {Boolean} Was the token found?
242          */
243         public bool read_punc (TokenStream stream, TokenArray tokens)
244         {
245             string found = "";
246             var name;
247             while (!stream.lookEOF() && Lang.punc(found + stream.look()).length > 0) {
248                 found += stream.next();
249             }
250             
251             
252             if (found === "") {
253                 return false;
254             }
255             
256             var ls = tokens.lastSym();
257             
258             if ((found == "}" || found == "]") && ls != null && ls.data == ",") {
259                 //print("Error - comma found before " + found);
260                 //print(JSON.stringify(tokens.lastSym(), null,4));
261                 if (this.ignoreBadGrammer) {
262                     print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
263                 } else {
264                     throw new TokenReader_Error.ArgumentError(
265                                 this.filename + ":" + this.line + "  comma found before " + found
266                   
267                     );
268                      
269                 }
270             }
271             
272             tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
273             return true;
274             
275         } 
276
277         /**
278             @returns {Boolean} Was the token found?
279          */
280         public bool read_space  (TokenStream stream, TokenArray tokens)
281         {
282             var found = "";
283             
284             while (!stream.lookEOF() && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
285                 found += stream.next();
286             }
287             
288             if (found === "") {
289                 return false;
290             }
291             //print("WHITE = " + JSON.stringify(found));
292             
293             // if we found a new line, then we could check if previous character was a ';' - if so we can drop it.
294             // otherwise generally keep it.. in which case it should reduce our issue with stripping new lines..
295             
296             if (this.collapseWhite) {
297                 found = " "; // this might work better if it was a '\n' ???
298             }
299             if (this.keepWhite) {
300                 tokens.push(new Token(found, "WHIT", "SPACE", this.line));
301             }
302             return true;
303         
304         },
305
306         /**
307             @returns {Boolean} Was the token found?
308          */
309         public bool read_newline  (TokenStream stream, TokenArray tokens)
310             var found = "";
311             var line = this.line;
312             while (!stream.look().eof && Lang.isNewline(stream.look())) {
313                 this.line++;
314                 found += stream.next();
315             }
316             
317             if (found === "") {
318                 return false;
319             }
320             //this.line++;
321             if (this.collapseWhite) {
322                 found = "\n";
323             }
324              if (this.keepWhite) {
325                 var last = tokens ? tokens.pop() : false;
326                 if (last && last.name != "WHIT") {
327                     tokens.push(last);
328                 }
329                 
330                 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
331             }
332             return true;
333         },
334
335         /**
336             @returns {Boolean} Was the token found?
337          */
338         read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
339             if (stream.look() == "/" && stream.look(1) == "*") {
340                 var found = stream.next(2);
341                 var c = '';
342                 var line = this.line;
343                 while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
344                     c = stream.next();
345                     if (c == "\n") this.line++;
346                     found += c;
347                 }
348                 
349                 // to start doclet we allow /** or /*** but not /**/ or /****
350                 if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
351                 else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
352                 return true;
353             }
354             return false;
355         },
356
357         /**
358             @returns {Boolean} Was the token found?
359          */
360         read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
361             var found;
362             if (
363                 (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
364                 || 
365                 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
366             ) {
367                 var line = this.line;
368                 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
369                     found += stream.next();
370                 }
371                 if (!stream.look().eof) {
372                     found += stream.next();
373                 }
374                 if (this.keepComments) {
375                     tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
376                 }
377                 this.line++;
378                 return true;
379             }
380             return false;
381         },
382
383         /**
384             @returns {Boolean} Was the token found?
385          */
386         read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
387             if (stream.look() == "\"") {
388                 // find terminator
389                 var string = stream.next();
390                 
391                 while (!stream.look().eof) {
392                     if (stream.look() == "\\") {
393                         if (Lang.isNewline(stream.look(1))) {
394                             do {
395                                 stream.next();
396                             } while (!stream.look().eof && Lang.isNewline(stream.look()));
397                             string += "\\\n";
398                         }
399                         else {
400                             string += stream.next(2);
401                         }
402                     }
403                     else if (stream.look() == "\"") {
404                         string += stream.next();
405                         tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
406                         return true;
407                     }
408                     else {
409                         string += stream.next();
410                     }
411                 }
412             }
413             return false; // error! unterminated string
414         },
415
416         /**
417             @returns {Boolean} Was the token found?
418          */
419         read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
420             if (stream.look() == "'") {
421                 // find terminator
422                 var string = stream.next();
423                 
424                 while (!stream.look().eof) {
425                     if (stream.look() == "\\") { // escape sequence
426                         string += stream.next(2);
427                     }
428                     else if (stream.look() == "'") {
429                         string += stream.next();
430                         tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
431                         return true;
432                     }
433                     else {
434                         string += stream.next();
435                     }
436                 }
437             }
438             return false; // error! unterminated string
439         },
440
441         /**
442             @returns {Boolean} Was the token found?
443          */
444         read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
445             if (stream.look() === "0" && stream.look(1) == "x") {
446                 return this.read_hex(stream, tokens);
447             }
448             
449             var found = "";
450             
451             while (!stream.look().eof && Lang.isNumber(found+stream.look())){
452                 found += stream.next();
453             }
454             
455             if (found === "") {
456                 return false;
457             }
458             else {
459                 if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
460                 else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
461                 return true;
462             }
463         },
464         /*t:
465             requires("../lib/JSDOC/TextStream.js");
466             requires("../lib/JSDOC/Token.js");
467             requires("../lib/JSDOC/Lang.js");
468             
469             plan(3, "testing read_numb");
470             
471             //// setup
472             var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
473             var tr = new TokenReader();
474             var tokens = tr.tokenize(new TextStream(src));
475             
476             var hexToken, octToken, decToken;
477             for (var i = 0; i < tokens.length; i++) {
478                 if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
479                 if (tokens[i].name == "OCTAL") octToken = tokens[i];
480                 if (tokens[i].name == "DECIMAL") decToken = tokens[i];
481             }
482             ////
483             
484             is(decToken.data, "8.0", "decimal number is found in source.");
485             is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
486             is(octToken.data, "0777", "octal number is found in source.");
487         */
488
489         /**
490             @returns {Boolean} Was the token found?
491          */
492         read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
493             var found = stream.next(2);
494             
495             while (!stream.look().eof) {
496                 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
497                     tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
498                     return true;
499                 }
500                 else {
501                     found += stream.next();
502                 }
503             }
504             return false;
505         },
506
507         /**
508             @returns {Boolean} Was the token found?
509          */
510         read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
511             var last;
512             if (
513                 stream.look() == "/"
514                 && 
515                 (
516                     
517                     (
518                         !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
519                         || 
520                         (
521                                !last.is("NUMB")
522                             && !last.is("NAME")
523                             && !last.is("RIGHT_PAREN")
524                             && !last.is("RIGHT_BRACKET")
525                         )
526                     )
527                 )
528             ) {
529                 var regex = stream.next();
530                 
531                 while (!stream.look().eof) {
532                     if (stream.look() == "\\") { // escape sequence
533                         regex += stream.next(2);
534                     }
535                     else if (stream.look() == "/") {
536                         regex += stream.next();
537                         
538                         while (/[gmi]/.test(stream.look())) {
539                             regex += stream.next();
540                         }
541                         
542                         tokens.push(new Token(regex, "REGX", "REGX", this.line));
543                         return true;
544                     }
545                     else {
546                         regex += stream.next();
547                     }
548                 }
549                 // error: unterminated regex
550             }
551             return false;
552         }
553 });