JSDOC/TokenReader.js
[gnome.introspection-doc-generator] / JSDOC / TokenReader.js
1 //<script type="text/javascript">
2
3  
4 XObject = imports.XObject.XObject;
5 console = imports.console.console;
6
7
8 Token   = imports.Token.Token;
9 Lang    = imports.Lang.Lang;
10
11 /**
12         @class Search a {@link JSDOC.TextStream} for language tokens.
13 */
14 TokenReader = XObject.define(
15     function(o) {
16         
17         XObject.extend(this, o || {});
18         
19     },
20     Object,
21     {
22         /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
23         collapseWhite : false, // only reduces white space...
24         /** @cfg {Boolean} keepDocs keep JSDOC comments **/
25         keepDocs : true,
26         /** @cfg {Boolean} keepWhite keep White space **/
27         keepWhite : false,
28         /** @cfg {Boolean} keepComments  keep all comments **/
29         keepComments : false,
30         /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
31         sepIdents : false,
32         /** @cfg {String} filename name of file being parsed. **/
33         filename : '',
34         /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
35         ignoreBadGrammer : false,
36         /**
37          * tokenize a stream
38          * @return {Array} of tokens
39          * 
40          * ts = new TextStream(File.read(str));
41          * tr = TokenReader({ keepComments : true, keepWhite : true });
42          * tr.tokenize(ts)
43          * 
44          */
45         tokenize : function(/**JSDOC.TextStream*/stream) {
46             this.line =1;
47             var tokens = [];
48             /**@ignore*/ 
49             tokens.last    = function() { return tokens[tokens.length-1]; }
50             /**@ignore*/ 
51             tokens.lastSym = function() {
52                 for (var i = tokens.length-1; i >= 0; i--) {
53                     if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
54                 }
55             }
56
57             while (!stream.look().eof) {
58                 if (this.read_mlcomment(stream, tokens)) continue;
59                 if (this.read_slcomment(stream, tokens)) continue;
60                 if (this.read_dbquote(stream, tokens))   continue;
61                 if (this.read_snquote(stream, tokens))   continue;
62                 if (this.read_regx(stream, tokens))      continue;
63                 if (this.read_numb(stream, tokens))      continue;
64                 if (this.read_punc(stream, tokens))      continue;
65                 if (this.read_newline(stream, tokens))   continue;
66                 if (this.read_space(stream, tokens))     continue;
67                 if (this.read_word(stream, tokens))      continue;
68                 
69                 // if execution reaches here then an error has happened
70                 tokens.push(new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line));
71             }
72             
73             
74             
75             return tokens;
76         },
77
78         /**
79          * findPuncToken - find the id of a token (previous to current)
80          * need to back check syntax..
81          * 
82          * @arg {Array} tokens the array of tokens.
83          * @arg {String} token data (eg. '(')
84          * @arg {Number} offset where to start reading from
85          * @return {Number} position of token
86          */
87         findPuncToken : function(tokens, data, n) {
88             n = n || tokens.length -1;
89             var stack = 0;
90             while (n > -1) {
91                 
92                 if (!stack && tokens[n].data == data) {
93                     return n;
94                 }
95                 
96                 if (tokens[n].data  == ')' || tokens[n].data  == '}') {
97                     stack++;
98                     n--;
99                     continue;
100                 }
101                 if (stack && (tokens[n].data  == '{' || tokens[n].data  == '(')) {
102                     stack--;
103                     n--;
104                     continue;
105                 }
106                 
107                 
108                 n--;
109             }
110             return -1;
111         },
112         /**
113          * lastSym - find the last token symbol
114          * need to back check syntax..
115          * 
116          * @arg {Array} tokens the array of tokens.
117          * @arg {Number} offset where to start..
118          * @return {Token} the token
119          */
120         lastSym : function(tokens, n) {
121             for (var i = n-1; i >= 0; i--) {
122                 if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
123             }
124         },
125         
126          
127         
128         /**
129             @returns {Boolean} Was the token found?
130          */
131         read_word : function(/**JSDOC.TokenStream*/stream, tokens) {
132             var found = "";
133             while (!stream.look().eof && Lang.isWordChar(stream.look())) {
134                 found += stream.next();
135             }
136             
137             if (found === "") {
138                 return false;
139             }
140             
141             var name;
142             if ((name = Lang.keyword(found))) {
143                 if (found == 'return' && tokens.lastSym().data == ')') {
144                     //Seed.print('@' + tokens.length);
145                     var n = this.findPuncToken(tokens, ')');
146                     //Seed.print(')@' + n);
147                     n = this.findPuncToken(tokens, '(', n-1);
148                     //Seed.print('(@' + n);
149                     
150                     var lt = this.lastSym(tokens, n);
151                    Seed.print(JSON.stringify(lt));
152                     if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) {
153                         throw {
154                             name : "ArgumentError", 
155                             message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
156                         }   
157                     }
158                     
159                     
160                     
161                 }
162                 
163                 tokens.push(new Token(found, "KEYW", name, this.line));
164                 return true;
165             }
166             if (!this.sepIdents || found.indexOf('.') < 0 ) {
167                 tokens.push(new Token(found, "NAME", "NAME", this.line));
168                 return true;
169             }
170             var n = found.split('.');
171             var p = false;
172             var _this = this;
173             n.forEach(function(nm) {
174                 if (p) {
175                     tokens.push(new Token('.', "PUNC", "DOT", _this.line));
176                 }
177                 p=true;
178                 tokens.push(new Token(nm, "NAME", "NAME", _this.line));
179             });
180             return true;
181                 
182
183         },
184
185         /**
186             @returns {Boolean} Was the token found?
187          */
188         read_punc : function(/**JSDOC.TokenStream*/stream, tokens) {
189             var found = "";
190             var name;
191             while (!stream.look().eof && Lang.punc(found+stream.look())) {
192                 found += stream.next();
193             }
194             
195             
196             if (found === "") {
197                 return false;
198             }
199             
200             if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
201                 //print("Error - comma found before " + found);
202                 //print(JSON.stringify(tokens.lastSym(), null,4));
203                 throw {
204                     name : "ArgumentError", 
205                     message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
206                 }   
207             }
208             
209             tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
210             return true;
211             
212         },
213
214         /**
215             @returns {Boolean} Was the token found?
216          */
217         read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
218             var found = "";
219             
220             while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
221                 found += stream.next();
222             }
223             
224             if (found === "") {
225                 return false;
226             }
227             //print("WHITE = " + JSON.stringify(found)); 
228             if (this.collapseWhite) found = " ";
229             if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
230             return true;
231         
232         },
233
234         /**
235             @returns {Boolean} Was the token found?
236          */
237         read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
238             var found = "";
239             var line = this.line;
240             while (!stream.look().eof && Lang.isNewline(stream.look())) {
241                 this.line++;
242                 found += stream.next();
243             }
244             
245             if (found === "") {
246                 return false;
247             }
248             //this.line++;
249             if (this.collapseWhite) {
250                 found = "\n";
251             }
252             if (this.keepWhite) {
253                 var last = tokens.pop();
254                 if (last && last.name != "WHIT") {
255                     tokens.push(last);
256                 }
257                 
258                 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
259             }
260             return true;
261         },
262
263         /**
264             @returns {Boolean} Was the token found?
265          */
266         read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
267             if (stream.look() == "/" && stream.look(1) == "*") {
268                 var found = stream.next(2);
269                 var c = '';
270                 var line = this.line;
271                 while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
272                     c = stream.next();
273                     if (c == "\n") this.line++;
274                     found += c;
275                 }
276                 
277                 // to start doclet we allow /** or /*** but not /**/ or /****
278                 if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
279                 else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
280                 return true;
281             }
282             return false;
283         },
284
285         /**
286             @returns {Boolean} Was the token found?
287          */
288         read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
289             var found;
290             if (
291                 (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
292                 || 
293                 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
294             ) {
295                 var line = this.line;
296                 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
297                     found += stream.next();
298                 }
299                 if (!stream.look().eof) {
300                     found += stream.next();
301                 }
302                 if (this.keepComments) {
303                     tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
304                 }
305                 this.line++;
306                 return true;
307             }
308             return false;
309         },
310
311         /**
312             @returns {Boolean} Was the token found?
313          */
314         read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
315             if (stream.look() == "\"") {
316                 // find terminator
317                 var string = stream.next();
318                 
319                 while (!stream.look().eof) {
320                     if (stream.look() == "\\") {
321                         if (Lang.isNewline(stream.look(1))) {
322                             do {
323                                 stream.next();
324                             } while (!stream.look().eof && Lang.isNewline(stream.look()));
325                             string += "\\\n";
326                         }
327                         else {
328                             string += stream.next(2);
329                         }
330                     }
331                     else if (stream.look() == "\"") {
332                         string += stream.next();
333                         tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
334                         return true;
335                     }
336                     else {
337                         string += stream.next();
338                     }
339                 }
340             }
341             return false; // error! unterminated string
342         },
343
344         /**
345             @returns {Boolean} Was the token found?
346          */
347         read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
348             if (stream.look() == "'") {
349                 // find terminator
350                 var string = stream.next();
351                 
352                 while (!stream.look().eof) {
353                     if (stream.look() == "\\") { // escape sequence
354                         string += stream.next(2);
355                     }
356                     else if (stream.look() == "'") {
357                         string += stream.next();
358                         tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
359                         return true;
360                     }
361                     else {
362                         string += stream.next();
363                     }
364                 }
365             }
366             return false; // error! unterminated string
367         },
368
369         /**
370             @returns {Boolean} Was the token found?
371          */
372         read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
373             if (stream.look() === "0" && stream.look(1) == "x") {
374                 return this.read_hex(stream, tokens);
375             }
376             
377             var found = "";
378             
379             while (!stream.look().eof && Lang.isNumber(found+stream.look())){
380                 found += stream.next();
381             }
382             
383             if (found === "") {
384                 return false;
385             }
386             else {
387                 if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
388                 else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
389                 return true;
390             }
391         },
392         /*t:
393             requires("../lib/JSDOC/TextStream.js");
394             requires("../lib/JSDOC/Token.js");
395             requires("../lib/JSDOC/Lang.js");
396             
397             plan(3, "testing read_numb");
398             
399             //// setup
400             var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
401             var tr = new TokenReader();
402             var tokens = tr.tokenize(new TextStream(src));
403             
404             var hexToken, octToken, decToken;
405             for (var i = 0; i < tokens.length; i++) {
406                 if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
407                 if (tokens[i].name == "OCTAL") octToken = tokens[i];
408                 if (tokens[i].name == "DECIMAL") decToken = tokens[i];
409             }
410             ////
411             
412             is(decToken.data, "8.0", "decimal number is found in source.");
413             is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
414             is(octToken.data, "0777", "octal number is found in source.");
415         */
416
417         /**
418             @returns {Boolean} Was the token found?
419          */
420         read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
421             var found = stream.next(2);
422             
423             while (!stream.look().eof) {
424                 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
425                     tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
426                     return true;
427                 }
428                 else {
429                     found += stream.next();
430                 }
431             }
432             return false;
433         },
434
435         /**
436             @returns {Boolean} Was the token found?
437          */
438         read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
439             var last;
440             if (
441                 stream.look() == "/"
442                 && 
443                 (
444                     
445                     (
446                         !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
447                         || 
448                         (
449                                !last.is("NUMB")
450                             && !last.is("NAME")
451                             && !last.is("RIGHT_PAREN")
452                             && !last.is("RIGHT_BRACKET")
453                         )
454                     )
455                 )
456             ) {
457                 var regex = stream.next();
458                 
459                 while (!stream.look().eof) {
460                     if (stream.look() == "\\") { // escape sequence
461                         regex += stream.next(2);
462                     }
463                     else if (stream.look() == "/") {
464                         regex += stream.next();
465                         
466                         while (/[gmi]/.test(stream.look())) {
467                             regex += stream.next();
468                         }
469                         
470                         tokens.push(new Token(regex, "REGX", "REGX", this.line));
471                         return true;
472                     }
473                     else {
474                         regex += stream.next();
475                     }
476                 }
477                 // error: unterminated regex
478             }
479             return false;
480         }
481 });