15d631903d9c96e8061afe8abf003b6fae68e479
[gnome.introspection-doc-generator] / JSDOC / TokenReader.vala
1 //<script type="text/javascript">
2
3  
4
5
6 //const Token   = imports.Token.Token;
7 //const Lang    = imports.Lang.Lang;
8
9 /**
10         @class Search a {@link JSDOC.TextStream} for language tokens.
11 */
12
13 namespace JSDOC {
14
15     public class TokenArray: Object {
16         
17         public Gee.ArrayList<Token> tokens;
18         public int length {
19             get { return this.tokens.size }
20         }
21         
22         public TokenArray()
23         {
24             this.items = new Gee.ArrayList<Token>();
25         }
26         
27         public Token? last() {
28             if (this.tokens > 0) {
29                 return this.tokens[this.tokens.length-1];
30             }
31             return null;
32         }
33         public Token? lastSym () {
34             for (var i = this.tokens.length-1; i >= 0; i--) {
35                 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM")))  {
36                     return this.tokens.get(i);
37                 }
38             }
39             return null;
40         }
41         public void push (Token t) {
42             this.tokens.add(t);
43         }
44         public Token? pop ()
45         {
46             if (this.size > 0) {
47                 return this.tokens.remove_at(this.size-1);
48             }
49             return null;
50         }
51         
52         public Token get(int i) {
53             return this.tokens.get(i);
54         }
55     }
56
57     errordomain TokenReader_Error {
58             ArgumentError
59     }
60     
61
62     public class TokenReader : Object
63     {
64         
65         
66         
67         /*
68          *
69          * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
70          */
71         
72         /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
73         public bool collapseWhite = false, // only reduces white space...
74         /** @cfg {Boolean} keepDocs keep JSDOC comments **/
75         public bool keepDocs = true,
76         /** @cfg {Boolean} keepWhite keep White space **/
77         public bool keepWhite = false,
78         /** @cfg {Boolean} keepComments  keep all comments **/
79         public bool keepComments = false,
80         /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
81         public bool sepIdents = false,
82         /** @cfg {String} filename name of file being parsed. **/
83         public string filename = "";
84         /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
85         public bool ignoreBadGrammer = false,
86         
87         
88         int line = 0;
89         
90         /**
91          * tokenize a stream
92          * @return {Array} of tokens
93          * 
94          * ts = new TextStream(File.read(str));
95          * tr = TokenReader({ keepComments : true, keepWhite : true });
96          * tr.tokenize(ts)
97          * 
98          */
99         public TokenArray tokenize(TextStream stream)
100         {
101             this.line =1;
102             var tokens = new TokenArray();
103            
104             bool eof;
105             while (!stream.lookEOF()) {
106                 
107                 
108                 if (this.read_mlcomment(stream, tokens)) continue;
109                 if (this.read_slcomment(stream, tokens)) continue;
110                 if (this.read_dbquote(stream, tokens))   continue;
111                 if (this.read_snquote(stream, tokens))   continue;
112                 if (this.read_regx(stream, tokens))      continue;
113                 if (this.read_numb(stream, tokens))      continue;
114                 if (this.read_punc(stream, tokens))      continue;
115                 if (this.read_newline(stream, tokens))   continue;
116                 if (this.read_space(stream, tokens))     continue;
117                 if (this.read_word(stream, tokens))      continue;
118                 
119                 // if execution reaches here then an error has happened
120                 tokens.push(
121                         new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
122                 );
123             }
124             
125             
126             
127             return tokens;
128         }
129
130         /**
131          * findPuncToken - find the id of a token (previous to current)
132          * need to back check syntax..
133          * 
134          * @arg {Array} tokens the array of tokens.
135          * @arg {String} token data (eg. '(')
136          * @arg {Number} offset where to start reading from
137          * @return {Number} position of token
138          */
139         public int findPuncToken(TokenArray tokens, string data, int n)
140         {
141             n = n || tokens.length -1;
142             var stack = 0;
143             while (n > -1) {
144                 
145                 if (!stack && tokens.get(n).data == data) {
146                     return n;
147                 }
148                 
149                 if (tokens.get(n).data  == ')' || tokens.get(n).data  == '}') {
150                     stack++;
151                     n--;
152                     continue;
153                 }
154                 if (stack && (tokens.get(n).data  == '{' || tokens.get(n).data  == '(')) {
155                     stack--;
156                     n--;
157                     continue;
158                 }
159                 
160                 
161                 n--;
162             }
163             return -1;
164         }
165         /**
166          * lastSym - find the last token symbol
167          * need to back check syntax..
168          * 
169          * @arg {Array} tokens the array of tokens.
170          * @arg {Number} offset where to start..
171          * @return {Token} the token
172          */
173         public Token lastSym(TokenArray tokens, int n)
174         {
175             for (var i = n-1; i >= 0; i--) {
176                 if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
177                     return tokens.get(i);
178                 }
179             }
180             return null;
181         }
182         
183          
184         
185         /**
186             @returns {Boolean} Was the token found?
187          */
188         public bool read_word (TokenStream stream, TokenArray tokens)
189         {
190             string found = "";
191             while (!stream.lookEOF() && Lang.isWordChar(stream.look())) {
192                 found += stream.next();
193             }
194             
195             if (found == "") {
196                 return false;
197             }
198             
199             var name = Lang.keyword(found);
200             if (name != null) {
201                 
202                 // look for "()return" ?? why ???
203                 var ls = tokens.lastSym();
204                 if (found == "return" && ls != null && ls.data == ")") {
205                     //Seed.print('@' + tokens.length);
206                     var n = this.findPuncToken(tokens, ")");
207                     //Seed.print(')@' + n);
208                     n = this.findPuncToken(tokens, "(", n-1);
209                     //Seed.print('(@' + n);
210                     
211                     var lt = this.lastSym(tokens, n);
212                     
213                     //print(JSON.stringify(lt));
214                     if (lt.type != "KEYW" || ["IF", 'WHILE'].indexOf(lt.name) < -1) {
215                         if (!this.ignoreBadGrammer) {
216                             throw new TokenReader_Error.ArgumentError(
217                                 this.filename + ":" + this.line + " Error - return found after )"
218                             );
219                         }
220                     }
221                     
222                     
223                     
224                 }
225                 
226                 tokens.push(new Token(found, "KEYW", name, this.line));
227                 return true;
228             }
229             
230             if (!this.sepIdents || found.indexOf('.') < 0 ) {
231                 tokens.push(new Token(found, "NAME", "NAME", this.line));
232                 return true;
233             }
234             var n = found.split('.');
235             var p = false;
236             foreach (unowned string nm in n) {
237                 if (p) {
238                     tokens.push(new Token('.', "PUNC", "DOT", this.line));
239                 }
240                 p=true;
241                 tokens.push(new Token(nm, "NAME", "NAME", this.line));
242             }
243             return true;
244                 
245
246         }
247
248         /**
249             @returns {Boolean} Was the token found?
250          */
251         public bool read_punc (TokenStream stream, TokenArray tokens)
252         {
253             string found = "";
254             var name;
255             while (!stream.lookEOF() && Lang.punc(found + stream.look()).length > 0) {
256                 found += stream.next();
257             }
258             
259             
260             if (found === "") {
261                 return false;
262             }
263             
264             var ls = tokens.lastSym();
265             
266             if ((found == "}" || found == "]") && ls != null && ls.data == ",") {
267                 //print("Error - comma found before " + found);
268                 //print(JSON.stringify(tokens.lastSym(), null,4));
269                 if (this.ignoreBadGrammer) {
270                     print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
271                 } else {
272                     throw new TokenReader_Error.ArgumentError(
273                                 this.filename + ":" + this.line + "  comma found before " + found
274                   
275                     );
276                      
277                 }
278             }
279             
280             tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
281             return true;
282             
283         } 
284
285         /**
286             @returns {Boolean} Was the token found?
287          */
288         public bool read_space  (TokenStream stream, TokenArray tokens)
289         {
290             var found = "";
291             
292             while (!stream.lookEOF() && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
293                 found += stream.next();
294             }
295             
296             if (found === "") {
297                 return false;
298             }
299             //print("WHITE = " + JSON.stringify(found));
300             
301              
302             if (this.collapseWhite) {
303                 found = " "; // this might work better if it was a '\n' ???
304             }
305             if (this.keepWhite) {
306                 tokens.push(new Token(found, "WHIT", "SPACE", this.line));
307             }
308             return true;
309         
310         }
311
312         /**
313             @returns {Boolean} Was the token found?
314          */
315         public bool read_newline  (TokenStream stream, TokenArray tokens)
316             var found = "";
317             var line = this.line;
318             while (!stream.lookEOF() && Lang.isNewline(stream.look())) {
319                 this.line++;
320                 found += stream.next();
321             }
322             
323             if (found === "") {
324                 return false;
325             }
326             
327             // if we found a new line, then we could check if previous character was a ';' - if so we can drop it.
328             // otherwise generally keep it.. in which case it should reduce our issue with stripping new lines..
329            
330             
331             //this.line++;
332             if (this.collapseWhite) {
333                 found = "\n";
334             }
335              if (this.keepWhite) {
336                 var last = tokens ? tokens.pop() : false;
337                 if (last && last.name != "WHIT") {
338                     tokens.push(last);
339                 }
340                 
341                 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
342             }
343             return true;
344         },
345
346         /**
347             @returns {Boolean} Was the token found?
348          */
349         read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
350             if (stream.look() == "/" && stream.look(1) == "*") {
351                 var found = stream.next(2);
352                 var c = '';
353                 var line = this.line;
354                 while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
355                     c = stream.next();
356                     if (c == "\n") this.line++;
357                     found += c;
358                 }
359                 
360                 // to start doclet we allow /** or /*** but not /**/ or /****
361                 if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
362                 else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
363                 return true;
364             }
365             return false;
366         },
367
368         /**
369             @returns {Boolean} Was the token found?
370          */
371         read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
372             var found;
373             if (
374                 (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
375                 || 
376                 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
377             ) {
378                 var line = this.line;
379                 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
380                     found += stream.next();
381                 }
382                 if (!stream.look().eof) {
383                     found += stream.next();
384                 }
385                 if (this.keepComments) {
386                     tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
387                 }
388                 this.line++;
389                 return true;
390             }
391             return false;
392         },
393
394         /**
395             @returns {Boolean} Was the token found?
396          */
397         read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
398             if (stream.look() == "\"") {
399                 // find terminator
400                 var string = stream.next();
401                 
402                 while (!stream.look().eof) {
403                     if (stream.look() == "\\") {
404                         if (Lang.isNewline(stream.look(1))) {
405                             do {
406                                 stream.next();
407                             } while (!stream.look().eof && Lang.isNewline(stream.look()));
408                             string += "\\\n";
409                         }
410                         else {
411                             string += stream.next(2);
412                         }
413                     }
414                     else if (stream.look() == "\"") {
415                         string += stream.next();
416                         tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
417                         return true;
418                     }
419                     else {
420                         string += stream.next();
421                     }
422                 }
423             }
424             return false; // error! unterminated string
425         },
426
427         /**
428             @returns {Boolean} Was the token found?
429          */
430         read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
431             if (stream.look() == "'") {
432                 // find terminator
433                 var string = stream.next();
434                 
435                 while (!stream.look().eof) {
436                     if (stream.look() == "\\") { // escape sequence
437                         string += stream.next(2);
438                     }
439                     else if (stream.look() == "'") {
440                         string += stream.next();
441                         tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
442                         return true;
443                     }
444                     else {
445                         string += stream.next();
446                     }
447                 }
448             }
449             return false; // error! unterminated string
450         },
451
452         /**
453             @returns {Boolean} Was the token found?
454          */
455         read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
456             if (stream.look() === "0" && stream.look(1) == "x") {
457                 return this.read_hex(stream, tokens);
458             }
459             
460             var found = "";
461             
462             while (!stream.look().eof && Lang.isNumber(found+stream.look())){
463                 found += stream.next();
464             }
465             
466             if (found === "") {
467                 return false;
468             }
469             else {
470                 if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
471                 else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
472                 return true;
473             }
474         },
475         /*t:
476             requires("../lib/JSDOC/TextStream.js");
477             requires("../lib/JSDOC/Token.js");
478             requires("../lib/JSDOC/Lang.js");
479             
480             plan(3, "testing read_numb");
481             
482             //// setup
483             var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
484             var tr = new TokenReader();
485             var tokens = tr.tokenize(new TextStream(src));
486             
487             var hexToken, octToken, decToken;
488             for (var i = 0; i < tokens.length; i++) {
489                 if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
490                 if (tokens[i].name == "OCTAL") octToken = tokens[i];
491                 if (tokens[i].name == "DECIMAL") decToken = tokens[i];
492             }
493             ////
494             
495             is(decToken.data, "8.0", "decimal number is found in source.");
496             is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
497             is(octToken.data, "0777", "octal number is found in source.");
498         */
499
500         /**
501             @returns {Boolean} Was the token found?
502          */
503         read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
504             var found = stream.next(2);
505             
506             while (!stream.look().eof) {
507                 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
508                     tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
509                     return true;
510                 }
511                 else {
512                     found += stream.next();
513                 }
514             }
515             return false;
516         },
517
518         /**
519             @returns {Boolean} Was the token found?
520          */
521         read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
522             var last;
523             if (
524                 stream.look() == "/"
525                 && 
526                 (
527                     
528                     (
529                         !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
530                         || 
531                         (
532                                !last.is("NUMB")
533                             && !last.is("NAME")
534                             && !last.is("RIGHT_PAREN")
535                             && !last.is("RIGHT_BRACKET")
536                         )
537                     )
538                 )
539             ) {
540                 var regex = stream.next();
541                 
542                 while (!stream.look().eof) {
543                     if (stream.look() == "\\") { // escape sequence
544                         regex += stream.next(2);
545                     }
546                     else if (stream.look() == "/") {
547                         regex += stream.next();
548                         
549                         while (/[gmi]/.test(stream.look())) {
550                             regex += stream.next();
551                         }
552                         
553                         tokens.push(new Token(regex, "REGX", "REGX", this.line));
554                         return true;
555                     }
556                     else {
557                         regex += stream.next();
558                     }
559                 }
560                 // error: unterminated regex
561             }
562             return false;
563         }
564 });