JSDOC/TokenReader.js
[gnome.introspection-doc-generator] / JSDOC / TokenReader.js
index 5512783..5e5cec8 100644 (file)
@@ -1,30 +1,38 @@
 //<script type="text/javascript">
 
  
-XObject = imports.XObject.XObject;
-console = imports.console.console;
+const XObject = imports.XObject.XObject;
+const console = imports.console.console;
 
 
-Token   = imports.Token.Token;
-Lang    = imports.Lang.Lang;
+const Token   = imports.Token.Token;
+const Lang    = imports.Lang.Lang;
 
 /**
        @class Search a {@link JSDOC.TextStream} for language tokens.
 */
-TokenReader = XObject.define(
+const TokenReader = XObject.define(
     function(o) {
         
-        this.keepDocs = true;
-        this.keepWhite = false;
-        this.keepComments = false;
-        this.sepIdents = false; // seperate '.' in identifiers..
         XObject.extend(this, o || {});
         
     },
     Object,
     {
+        /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
         collapseWhite : false, // only reduces white space...
-
+        /** @cfg {Boolean} keepDocs keep JSDOC comments **/
+        keepDocs : true,
+        /** @cfg {Boolean} keepWhite keep White space **/
+        keepWhite : false,
+        /** @cfg {Boolean} keepComments  keep all comments **/
+        keepComments : false,
+        /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
+        sepIdents : false,
+        /** @cfg {String} filename name of file being parsed. **/
+        filename : '',
+        /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
+        ignoreBadGrammer : false,
         /**
          * tokenize a stream
          * @return {Array} of tokens
@@ -34,17 +42,17 @@ TokenReader = XObject.define(
          * tr.tokenize(ts)
          * 
          */
-            
-
-
         tokenize : function(/**JSDOC.TextStream*/stream) {
             this.line =1;
             var tokens = [];
-            /**@ignore*/ tokens.last    = function() { return tokens[tokens.length-1]; }
-            /**@ignore*/ tokens.lastSym = function() {
+            /**@ignore*/ 
+            tokens.last    = function() { return tokens[tokens.length-1]; }
+            /**@ignore*/ 
+            tokens.lastSym = function() {
                 for (var i = tokens.length-1; i >= 0; i--) {
                     if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
                 }
+                return true;
             }
 
             while (!stream.look().eof) {
@@ -68,6 +76,57 @@ TokenReader = XObject.define(
             return tokens;
         },
 
+        /**
+         * findPuncToken - find the id of a token (previous to current)
+         * need to back check syntax..
+         * 
+         * @arg {Array} tokens the array of tokens.
+         * @arg {String} token data (eg. '(')
+         * @arg {Number} offset where to start reading from
+         * @return {Number} position of token
+         */
+        findPuncToken : function(tokens, data, n) {
+            n = n || tokens.length -1;
+            var stack = 0;
+            while (n > -1) {
+                
+                if (!stack && tokens[n].data == data) {
+                    return n;
+                }
+                
+                if (tokens[n].data  == ')' || tokens[n].data  == '}') {
+                    stack++;
+                    n--;
+                    continue;
+                }
+                if (stack && (tokens[n].data  == '{' || tokens[n].data  == '(')) {
+                    stack--;
+                    n--;
+                    continue;
+                }
+                
+                
+                n--;
+            }
+            return -1;
+        },
+        /**
+         * lastSym - find the last token symbol
+         * need to back check syntax..
+         * 
+         * @arg {Array} tokens the array of tokens.
+         * @arg {Number} offset where to start..
+         * @return {Token} the token
+         */
+        lastSym : function(tokens, n) {
+            for (var i = n-1; i >= 0; i--) {
+                if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
+            }
+            return null;
+        },
+        
+         
+        
         /**
             @returns {Boolean} Was the token found?
          */
@@ -80,29 +139,51 @@ TokenReader = XObject.define(
             if (found === "") {
                 return false;
             }
-            else {
-                var name;
-                if ((name = Lang.keyword(found))) {
-                    tokens.push(new Token(found, "KEYW", name, this.line));
-                    return true;
-                }
-                if (!this.sepIdents || found.indexOf('.') < 0 ) {
-                    tokens.push(new Token(found, "NAME", "NAME", this.line));
-                    return true;
-                }
-                var n = found.split('.');
-                var p = false;
-                var _this = this;
-                n.forEach(function(nm) {
-                    if (p) {
-                        tokens.push(new Token('.', "PUNC", "DOT", _this.line));
+            
+            var name;
+            if ((name = Lang.keyword(found))) {
+                if (found == 'return' && tokens.lastSym().data == ')') {
+                    //Seed.print('@' + tokens.length);
+                    var n = this.findPuncToken(tokens, ')');
+                    //Seed.print(')@' + n);
+                    n = this.findPuncToken(tokens, '(', n-1);
+                    //Seed.print('(@' + n);
+                    
+                    var lt = this.lastSym(tokens, n);
+                    print(JSON.stringify(lt));
+                    if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) {
+                        if (!this.ignoreBadGrammer) {
+                            throw {
+                                name : "ArgumentError", 
+                                message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
+                            }
+                        }
                     }
-                    p=true;
-                    tokens.push(new Token(nm, "NAME", "NAME", _this.line));
-                });
-                return true;
+                    
+                    
+                    
+                }
                 
+                tokens.push(new Token(found, "KEYW", name, this.line));
+                return true;
+            }
+            if (!this.sepIdents || found.indexOf('.') < 0 ) {
+                tokens.push(new Token(found, "NAME", "NAME", this.line));
+                return true;
             }
+            var n = found.split('.');
+            var p = false;
+            var _this = this;
+            n.forEach(function(nm) {
+                if (p) {
+                    tokens.push(new Token('.', "PUNC", "DOT", _this.line));
+                }
+                p=true;
+                tokens.push(new Token(nm, "NAME", "NAME", _this.line));
+            });
+            return true;
+                
+
         },
 
         /**
@@ -115,13 +196,28 @@ TokenReader = XObject.define(
                 found += stream.next();
             }
             
+            
             if (found === "") {
                 return false;
             }
-            else {
-                tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
-                return true;
+            
+            if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
+                //print("Error - comma found before " + found);
+                //print(JSON.stringify(tokens.lastSym(), null,4));
+                if (this.ignoreBadGrammer) {
+                    print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
+                } else {
+                    
+                    throw {
+                        name : "ArgumentError", 
+                        message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
+                    }
+                }
             }
+            
+            tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
+            return true;
+            
         },
 
         /**
@@ -130,18 +226,18 @@ TokenReader = XObject.define(
         read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
             var found = "";
             
-            while (!stream.look().eof && Lang.isSpace(stream.look())) {
+            while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
                 found += stream.next();
             }
             
             if (found === "") {
                 return false;
             }
-            else {
-                if (this.collapseWhite) found = " ";
-                if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
-                return true;
-            }
+            //print("WHITE = " + JSON.stringify(found)); 
+            if (this.collapseWhite) found = " ";
+            if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
+            return true;
+        
         },
 
         /**
@@ -149,7 +245,7 @@ TokenReader = XObject.define(
          */
         read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
             var found = "";
-            
+            var line = this.line;
             while (!stream.look().eof && Lang.isNewline(stream.look())) {
                 this.line++;
                 found += stream.next();
@@ -158,20 +254,19 @@ TokenReader = XObject.define(
             if (found === "") {
                 return false;
             }
-            else {
-                if (this.collapseWhite) {
-                    found = "\n";
-                }
-                if (this.keepWhite) {
-                    var last = tokens.pop();
-                    if (last.name != "WHIT") {
-                        tokens.push(last);
-                    }
-                    
-                    tokens.push(new Token(found, "WHIT", "NEWLINE", this.line));
+            //this.line++;
+            if (this.collapseWhite) {
+                found = "\n";
+            }
+             if (this.keepWhite) {
+                var last = tokens ? tokens.pop() : false;
+                if (last && last.name != "WHIT") {
+                    tokens.push(last);
                 }
-                return true;
+                
+                tokens.push(new Token(found, "WHIT", "NEWLINE", line));
             }
+            return true;
         },
 
         /**
@@ -181,6 +276,7 @@ TokenReader = XObject.define(
             if (stream.look() == "/" && stream.look(1) == "*") {
                 var found = stream.next(2);
                 var c = '';
+                var line = this.line;
                 while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
                     c = stream.next();
                     if (c == "\n") this.line++;
@@ -189,7 +285,7 @@ TokenReader = XObject.define(
                 
                 // to start doclet we allow /** or /*** but not /**/ or /****
                 if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
-                else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", this.line));
+                else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
                 return true;
             }
             return false;
@@ -205,13 +301,15 @@ TokenReader = XObject.define(
                 || 
                 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
             ) {
-                
+                var line = this.line;
                 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
                     found += stream.next();
                 }
-                
+                if (!stream.look().eof) {
+                    found += stream.next();
+                }
                 if (this.keepComments) {
-                    tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", this.line));
+                    tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
                 }
                 this.line++;
                 return true;
@@ -366,12 +464,25 @@ TokenReader = XObject.define(
                 )
             ) {
                 var regex = stream.next();
-                
+                var unbrace = false;
                 while (!stream.look().eof) {
+                    
+                    if (stream.look() == "[") { // escape sequence
+                        in_brace = true;
+                        continue;
+                    }
+                    
+                    if (in_brace && stream.look() == "[") { // escape sequence
+                        in_brace = true;
+                        continue;
+                    }
+                    
                     if (stream.look() == "\\") { // escape sequence
                         regex += stream.next(2);
+                        continue;
                     }
-                    else if (stream.look() == "/") {
+                    
+                    if (!in_brace && stream.look() == "/") {
                         regex += stream.next();
                         
                         while (/[gmi]/.test(stream.look())) {
@@ -381,9 +492,9 @@ TokenReader = XObject.define(
                         tokens.push(new Token(regex, "REGX", "REGX", this.line));
                         return true;
                     }
-                    else {
-                        regex += stream.next();
-                    }
+                    
+                    regex += stream.next();
+                    
                 }
                 // error: unterminated regex
             }