29deac8475a65b64c9b6cfbd3a52d45b92512cf6
[gnome.introspection-doc-generator] / JSDOC / TokenReader.vala
1 //<script type="text/javascript">
2
3  
4
5
6 //const Token   = imports.Token.Token;
7 //const Lang    = imports.Lang.Lang;
8
9 /**
10         @class Search a {@link JSDOC.TextStream} for language tokens.
11 */
12
13 namespace JSDOC {
14
15     public class TokenArray: Object {
16         
17         public Gee.ArrayList<Token> tokens;
18         public int length {
19             get { return this.tokens.size }
20         }
21         
22         public TokenArray()
23         {
24             this.items = new Gee.ArrayList<Token>();
25         }
26         
27         public Token? last() {
28             if (this.tokens > 0) {
29                 return this.tokens[this.tokens.length-1];
30             }
31             return null;
32         }
33         public Token? lastSym () {
34             for (var i = this.tokens.length-1; i >= 0; i--) {
35                 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM")))  {
36                     return this.tokens.get(i);
37                 }
38             }
39             return null;
40         }
41         public void push (Token t) {
42             this.tokens.add(t);
43         }
44         public Token? pop ()
45         {
46             if (this.size > 0) {
47                 return this.tokens.remove_at(this.size-1);
48             }
49             return null;
50         }
51         
52         public Token get(int i) {
53             return this.tokens.get(i);
54         }
55     }
56
57     errordomain TokenReader_Error {
58             ArgumentError
59     }
60     
61
62     public class TokenReader : Object
63     {
64         
65         
66         
67         /*
68          *
69          * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
70          */
71         
72         /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
73         public bool collapseWhite = false, // only reduces white space...
74         /** @cfg {Boolean} keepDocs keep JSDOC comments **/
75         public bool keepDocs = true,
76         /** @cfg {Boolean} keepWhite keep White space **/
77         public bool keepWhite = false,
78         /** @cfg {Boolean} keepComments  keep all comments **/
79         public bool keepComments = false,
80         /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
81         public bool sepIdents = false,
82         /** @cfg {String} filename name of file being parsed. **/
83         public string filename = "";
84         /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
85         public bool ignoreBadGrammer = false,
86         
87         
88         int line = 0;
89         
90         /**
91          * tokenize a stream
92          * @return {Array} of tokens
93          * 
94          * ts = new TextStream(File.read(str));
95          * tr = TokenReader({ keepComments : true, keepWhite : true });
96          * tr.tokenize(ts)
97          * 
98          */
99         public TokenArray tokenize(TextStream stream)
100         {
101             this.line =1;
102             var tokens = new TokenArray();
103            
104             bool eof;
105             while (!stream.lookEOF()) {
106                 
107                 
108                 if (this.read_mlcomment(stream, tokens)) continue;
109                 if (this.read_slcomment(stream, tokens)) continue;
110                 if (this.read_dbquote(stream, tokens))   continue;
111                 if (this.read_snquote(stream, tokens))   continue;
112                 if (this.read_regx(stream, tokens))      continue;
113                 if (this.read_numb(stream, tokens))      continue;
114                 if (this.read_punc(stream, tokens))      continue;
115                 if (this.read_newline(stream, tokens))   continue;
116                 if (this.read_space(stream, tokens))     continue;
117                 if (this.read_word(stream, tokens))      continue;
118                 
119                 // if execution reaches here then an error has happened
120                 tokens.push(
121                         new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
122                 );
123             }
124             
125             
126             
127             return tokens;
128         }
129
130         /**
131          * findPuncToken - find the id of a token (previous to current)
132          * need to back check syntax..
133          * 
134          * @arg {Array} tokens the array of tokens.
135          * @arg {String} token data (eg. '(')
136          * @arg {Number} offset where to start reading from
137          * @return {Number} position of token
138          */
139         public int findPuncToken(TokenArray tokens, string data, int n)
140         {
141             n = n || tokens.length -1;
142             var stack = 0;
143             while (n > -1) {
144                 
145                 if (!stack && tokens.get(n).data == data) {
146                     return n;
147                 }
148                 
149                 if (tokens.get(n).data  == ')' || tokens.get(n).data  == '}') {
150                     stack++;
151                     n--;
152                     continue;
153                 }
154                 if (stack && (tokens.get(n).data  == '{' || tokens.get(n).data  == '(')) {
155                     stack--;
156                     n--;
157                     continue;
158                 }
159                 
160                 
161                 n--;
162             }
163             return -1;
164         }
165         /**
166          * lastSym - find the last token symbol
167          * need to back check syntax..
168          * 
169          * @arg {Array} tokens the array of tokens.
170          * @arg {Number} offset where to start..
171          * @return {Token} the token
172          */
173         public Token lastSym(TokenArray tokens, int n)
174         {
175             for (var i = n-1; i >= 0; i--) {
176                 if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
177                     return tokens.get(i);
178                 }
179             }
180             return null;
181         }
182         
183          
184         
185         /**
186             @returns {Boolean} Was the token found?
187          */
188         public bool read_word (TokenStream stream, TokenArray tokens)
189         {
190             string found = "";
191             while (!stream.lookEOF() && Lang.isWordChar(stream.look())) {
192                 found += stream.next();
193             }
194             
195             if (found == "") {
196                 return false;
197             }
198             
199             var name = Lang.keyword(found);
200             if (name != null) {
201                 
202                 // look for "()return" ?? why ???
203                 var ls = tokens.lastSym();
204                 if (found == "return" && ls != null && ls.data == ")") {
205                     //Seed.print('@' + tokens.length);
206                     var n = this.findPuncToken(tokens, ")");
207                     //Seed.print(')@' + n);
208                     n = this.findPuncToken(tokens, "(", n-1);
209                     //Seed.print('(@' + n);
210                     
211                     var lt = this.lastSym(tokens, n);
212                     
213                     //print(JSON.stringify(lt));
214                     if (lt.type != "KEYW" || ["IF", 'WHILE'].indexOf(lt.name) < -1) {
215                         if (!this.ignoreBadGrammer) {
216                             throw new TokenReader_Error.ArgumentError(
217                                 this.filename + ":" + this.line + " Error - return found after )"
218                             );
219                         }
220                     }
221                     
222                     
223                     
224                 }
225                 
226                 tokens.push(new Token(found, "KEYW", name, this.line));
227                 return true;
228             }
229             
230             if (!this.sepIdents || found.indexOf('.') < 0 ) {
231                 tokens.push(new Token(found, "NAME", "NAME", this.line));
232                 return true;
233             }
234             var n = found.split('.');
235             var p = false;
236             foreach (unowned string nm in n) {
237                 if (p) {
238                     tokens.push(new Token('.', "PUNC", "DOT", this.line));
239                 }
240                 p=true;
241                 tokens.push(new Token(nm, "NAME", "NAME", this.line));
242             }
243             return true;
244                 
245
246         }
247
248         /**
249             @returns {Boolean} Was the token found?
250          */
251         public bool read_punc (TokenStream stream, TokenArray tokens)
252         {
253             string found = "";
254             var name;
255             while (!stream.lookEOF() && Lang.punc(found + stream.look()).length > 0) {
256                 found += stream.next();
257             }
258             
259             
260             if (found === "") {
261                 return false;
262             }
263             
264             var ls = tokens.lastSym();
265             
266             if ((found == "}" || found == "]") && ls != null && ls.data == ",") {
267                 //print("Error - comma found before " + found);
268                 //print(JSON.stringify(tokens.lastSym(), null,4));
269                 if (this.ignoreBadGrammer) {
270                     print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
271                 } else {
272                     throw new TokenReader_Error.ArgumentError(
273                                 this.filename + ":" + this.line + "  comma found before " + found
274                   
275                     );
276                      
277                 }
278             }
279             
280             tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
281             return true;
282             
283         } 
284
285         /**
286             @returns {Boolean} Was the token found?
287          */
288         public bool read_space  (TokenStream stream, TokenArray tokens)
289         {
290             var found = "";
291             
292             while (!stream.lookEOF() && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
293                 found += stream.next();
294             }
295             
296             if (found === "") {
297                 return false;
298             }
299             //print("WHITE = " + JSON.stringify(found));
300             
301              
302             if (this.collapseWhite) {
303                 found = " "; // this might work better if it was a '\n' ???
304             }
305             if (this.keepWhite) {
306                 tokens.push(new Token(found, "WHIT", "SPACE", this.line));
307             }
308             return true;
309         
310         }
311
312         /**
313             @returns {Boolean} Was the token found?
314          */
315         public bool read_newline  (TokenStream stream, TokenArray tokens)
316             var found = "";
317             var line = this.line;
318             while (!stream.lookEOF() && Lang.isNewline(stream.look())) {
319                 this.line++;
320                 found += stream.next();
321             }
322             
323             if (found === "") {
324                 return false;
325             }
326             
327             // if we found a new line, then we could check if previous character was a ';' - if so we can drop it.
328             // otherwise generally keep it.. in which case it should reduce our issue with stripping new lines..
329            
330             
331             //this.line++;
332             if (this.collapseWhite) {
333                 found = "\n"; // reduces multiple line breaks into a single one...
334             }
335             
336             if (this.keepWhite) {
337                 var last = tokens.pop();
338                 if (last != null && last.name != "WHIT") {
339                     tokens.push(last);
340                 }
341                 // replaces last new line... 
342                 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
343             }
344             return true;
345         },
346
347         /**
348             @returns {Boolean} Was the token found?
349          */
350         public bool read_mlcomment  (TokenStream stream, TokenArray tokens)
351         {
352             if (stream.look() != "/") {
353                 return false;
354             }
355             if (stream.look(1) != "*") {
356                 return false;
357             }
358             var found = stream.next(2);
359             var c = '';
360             var line = this.line;
361             while (!stream.lookEOF() && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
362                 c = stream.next();
363                 if (c == "\n") {
364                     this.line++;
365                 }
366                 found += c;
367             }
368             
369             // to start doclet we allow /** or /*** but not /**/ or /****
370             //if (found.length /^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) {
371             if ((this.keepDocs && found.length > 4 && found.index_of("/**") == 0 && found[3] != "/") {
372                 tokens.push(new Token(found, "COMM", "JSDOC", this.line));
373             } else if (this.keepComments) {
374                 tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
375             }
376             return true;
377         
378         },
379
380         /**
381             @returns {Boolean} Was the token found?
382          */
383         read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
384             var found;
385             if (
386                 (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
387                 || 
388                 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
389             ) {
390                 var line = this.line;
391                 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
392                     found += stream.next();
393                 }
394                 if (!stream.look().eof) {
395                     found += stream.next();
396                 }
397                 if (this.keepComments) {
398                     tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
399                 }
400                 this.line++;
401                 return true;
402             }
403             return false;
404         },
405
406         /**
407             @returns {Boolean} Was the token found?
408          */
409         read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
410             if (stream.look() == "\"") {
411                 // find terminator
412                 var string = stream.next();
413                 
414                 while (!stream.look().eof) {
415                     if (stream.look() == "\\") {
416                         if (Lang.isNewline(stream.look(1))) {
417                             do {
418                                 stream.next();
419                             } while (!stream.look().eof && Lang.isNewline(stream.look()));
420                             string += "\\\n";
421                         }
422                         else {
423                             string += stream.next(2);
424                         }
425                     }
426                     else if (stream.look() == "\"") {
427                         string += stream.next();
428                         tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
429                         return true;
430                     }
431                     else {
432                         string += stream.next();
433                     }
434                 }
435             }
436             return false; // error! unterminated string
437         },
438
439         /**
440             @returns {Boolean} Was the token found?
441          */
442         read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
443             if (stream.look() == "'") {
444                 // find terminator
445                 var string = stream.next();
446                 
447                 while (!stream.look().eof) {
448                     if (stream.look() == "\\") { // escape sequence
449                         string += stream.next(2);
450                     }
451                     else if (stream.look() == "'") {
452                         string += stream.next();
453                         tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
454                         return true;
455                     }
456                     else {
457                         string += stream.next();
458                     }
459                 }
460             }
461             return false; // error! unterminated string
462         },
463
464         /**
465             @returns {Boolean} Was the token found?
466          */
467         read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
468             if (stream.look() === "0" && stream.look(1) == "x") {
469                 return this.read_hex(stream, tokens);
470             }
471             
472             var found = "";
473             
474             while (!stream.look().eof && Lang.isNumber(found+stream.look())){
475                 found += stream.next();
476             }
477             
478             if (found === "") {
479                 return false;
480             }
481             else {
482                 if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
483                 else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
484                 return true;
485             }
486         },
487         /*t:
488             requires("../lib/JSDOC/TextStream.js");
489             requires("../lib/JSDOC/Token.js");
490             requires("../lib/JSDOC/Lang.js");
491             
492             plan(3, "testing read_numb");
493             
494             //// setup
495             var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
496             var tr = new TokenReader();
497             var tokens = tr.tokenize(new TextStream(src));
498             
499             var hexToken, octToken, decToken;
500             for (var i = 0; i < tokens.length; i++) {
501                 if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
502                 if (tokens[i].name == "OCTAL") octToken = tokens[i];
503                 if (tokens[i].name == "DECIMAL") decToken = tokens[i];
504             }
505             ////
506             
507             is(decToken.data, "8.0", "decimal number is found in source.");
508             is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
509             is(octToken.data, "0777", "octal number is found in source.");
510         */
511
512         /**
513             @returns {Boolean} Was the token found?
514          */
515         read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
516             var found = stream.next(2);
517             
518             while (!stream.look().eof) {
519                 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
520                     tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
521                     return true;
522                 }
523                 else {
524                     found += stream.next();
525                 }
526             }
527             return false;
528         },
529
530         /**
531             @returns {Boolean} Was the token found?
532          */
533         read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
534             var last;
535             if (
536                 stream.look() == "/"
537                 && 
538                 (
539                     
540                     (
541                         !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
542                         || 
543                         (
544                                !last.is("NUMB")
545                             && !last.is("NAME")
546                             && !last.is("RIGHT_PAREN")
547                             && !last.is("RIGHT_BRACKET")
548                         )
549                     )
550                 )
551             ) {
552                 var regex = stream.next();
553                 
554                 while (!stream.look().eof) {
555                     if (stream.look() == "\\") { // escape sequence
556                         regex += stream.next(2);
557                     }
558                     else if (stream.look() == "/") {
559                         regex += stream.next();
560                         
561                         while (/[gmi]/.test(stream.look())) {
562                             regex += stream.next();
563                         }
564                         
565                         tokens.push(new Token(regex, "REGX", "REGX", this.line));
566                         return true;
567                     }
568                     else {
569                         regex += stream.next();
570                     }
571                 }
572                 // error: unterminated regex
573             }
574             return false;
575         }
576 });