JSDOC/TokenReader.js
[gnome.introspection-doc-generator] / JSDOC / TokenReader.vala
index 8bf3716..9f2c12d 100644 (file)
@@ -1,37 +1,37 @@
 //<script type="text/javascript">
 
  
-
-
+// test code
 //const Token   = imports.Token.Token;
 //const Lang    = imports.Lang.Lang;
 
 /**
        @class Search a {@link JSDOC.TextStream} for language tokens.
 */
-
 namespace JSDOC {
 
     public class TokenArray: Object {
         
         public Gee.ArrayList<Token> tokens;
         public int length {
-            get { return this.tokens.size }
+            get { return this.tokens.size; }
         }
         
         public TokenArray()
         {
-            this.items = new Gee.ArrayList<Token>();
+            this.tokens = new Gee.ArrayList<Token>();
         }
         
         public Token? last() {
-            if (this.tokens > 0) {
-                return this.tokens[this.tokens.length-1];
+            if (this.tokens.size > 0) {
+                return this.tokens.get(this.tokens.size-1);
             }
             return null;
         }
         public Token? lastSym () {
-            for (var i = this.tokens.length-1; i >= 0; i--) {
+            for (var i = this.tokens.size-1; i >= 0; i--) {
                 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM")))  {
                     return this.tokens.get(i);
                 }
@@ -41,11 +41,30 @@ namespace JSDOC {
         public void push (Token t) {
             this.tokens.add(t);
         }
-        public Token get(int i) {
+        public Token? pop ()
+        {
+            if (this.tokens.size > 0) {
+                return this.tokens.remove_at(this.tokens.size-1);
+            }
+            return null;
+        }
+        
+           public new Token get(int i) {
             return this.tokens.get(i);
         }
+        public void dump()
+        {
+               foreach(var token in this.tokens) {
+                       print(token.asString() +"\n");
+               }
+        }
+        
     }
 
+    public errordomain TokenReader_Error {
+            ArgumentError
+    }
+    
 
     public class TokenReader : Object
     {
@@ -58,19 +77,19 @@ namespace JSDOC {
          */
         
         /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
-        public bool collapseWhite = false, // only reduces white space...
+        public bool collapseWhite = false; // only reduces white space...
         /** @cfg {Boolean} keepDocs keep JSDOC comments **/
-        public bool keepDocs = true,
+        public bool keepDocs = true;
         /** @cfg {Boolean} keepWhite keep White space **/
-        public bool keepWhite = false,
+        public bool keepWhite = false;
         /** @cfg {Boolean} keepComments  keep all comments **/
-        public bool keepComments = false,
+        public bool keepComments = false;
         /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
-        public bool sepIdents = false,
+        public bool sepIdents = false;
         /** @cfg {String} filename name of file being parsed. **/
         public string filename = "";
         /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
-        public bool ignoreBadGrammer = false,
+        public bool ignoreBadGrammer = false;
         
         
         int line = 0;
@@ -89,13 +108,10 @@ namespace JSDOC {
             this.line =1;
             var tokens = new TokenArray();
            
-            bool eof;
-            while (true) {
+         
+            while (!stream.lookEOF()) {
                 
-                stream.look(0, out eof) 
-                if (eof) {
-                    break;
-                }
+
                 if (this.read_mlcomment(stream, tokens)) continue;
                 if (this.read_slcomment(stream, tokens)) continue;
                 if (this.read_dbquote(stream, tokens))   continue;
@@ -116,7 +132,7 @@ namespace JSDOC {
             
             
             return tokens;
-        },
+        }
 
         /**
          * findPuncToken - find the id of a token (previous to current)
@@ -127,21 +143,22 @@ namespace JSDOC {
          * @arg {Number} offset where to start reading from
          * @return {Number} position of token
          */
-        public int findPuncToken(TokenArray tokens, string data, int n) {
-            n = n || tokens.length -1;
+        public int findPuncToken(TokenArray tokens, string data, int n)
+        {
+            n = n > 0 ? n :  tokens.length -1;
             var stack = 0;
             while (n > -1) {
                 
-                if (!stack && tokens.get(n).data == data) {
+                if (stack < 1 && tokens.get(n).data == data) {
                     return n;
                 }
                 
-                if (tokens.get(n).data  == ')' || tokens.get(n).data  == '}') {
+                if (tokens.get(n).data  == ")" || tokens.get(n).data  == "}") {
                     stack++;
                     n--;
                     continue;
                 }
-                if (stack && (tokens.get(n).data  == '{' || tokens.get(n).data  == '(')) {
+                if (stack > 0 && (tokens.get(n).data  == "{" || tokens.get(n).data  == "(")) {
                     stack--;
                     n--;
                     continue;
@@ -151,7 +168,7 @@ namespace JSDOC {
                 n--;
             }
             return -1;
-        },
+        }
         /**
          * lastSym - find the last token symbol
          * need to back check syntax..
@@ -160,194 +177,239 @@ namespace JSDOC {
          * @arg {Number} offset where to start..
          * @return {Token} the token
          */
-        public Token lastSym(TokenArray tokens, int n) {
+        public Token? lastSym(TokenArray tokens, int n)
+        {
             for (var i = n-1; i >= 0; i--) {
-                if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
+                if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
+                    return tokens.get(i);
+                }
             }
             return null;
-        },
+        }
         
          
         
         /**
             @returns {Boolean} Was the token found?
          */
-        read_word : function(/**JSDOC.TokenStream*/stream, tokens) {
-            var found = "";
-            while (!stream.look().eof && Lang.isWordChar(stream.look())) {
+        public bool read_word (TextStream stream, TokenArray tokens)
+        {
+            string found = "";
+            while (!stream.lookEOF() && Lang.isWordChar(stream.look().to_string())) {
                 found += stream.next();
             }
             
-            if (found === "") {
+            if (found == "") {
                 return false;
             }
             
-            var name;
-            if ((name = Lang.keyword(found))) {
-                if (found == 'return' && tokens.lastSym().data == ')') {
+            var name = Lang.keyword(found);
+            if (name != null) {
+                
+                // look for "()return" ?? why ???
+                var ls = tokens.lastSym();
+                if (found == "return" && ls != null && ls.data == ")") {
                     //Seed.print('@' + tokens.length);
-                    var n = this.findPuncToken(tokens, ')');
+                    var n = this.findPuncToken(tokens, ")", 0);
                     //Seed.print(')@' + n);
-                    n = this.findPuncToken(tokens, '(', n-1);
+                    n = this.findPuncToken(tokens, "(", n-1);
                     //Seed.print('(@' + n);
                     
-                    var lt = this.lastSym(tokens, n);
-                    print(JSON.stringify(lt));
-                    if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) {
+                    //var lt = this.lastSym(tokens, n);
+                    /*
+                    //print(JSON.stringify(lt));
+                    if (lt.type != "KEYW" || ["IF", 'WHILE'].indexOf(lt.name) < -1) {
                         if (!this.ignoreBadGrammer) {
-                            throw {
-                                name : "ArgumentError", 
-                                message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
-                            }
+                            throw new TokenReader_Error.ArgumentError(
+                                this.filename + ":" + this.line + " Error - return found after )"
+                            );
                         }
                     }
                     
-                    
+                    */
                     
                 }
                 
                 tokens.push(new Token(found, "KEYW", name, this.line));
                 return true;
             }
-            if (!this.sepIdents || found.indexOf('.') < 0 ) {
+            
+            if (!this.sepIdents || found.index_of(".") < 0 ) {
                 tokens.push(new Token(found, "NAME", "NAME", this.line));
                 return true;
             }
-            var n = found.split('.');
+            var n = found.split(".");
             var p = false;
-            var _this = this;
-            n.forEach(function(nm) {
+            foreach (unowned string nm in n) {
                 if (p) {
-                    tokens.push(new Token('.', "PUNC", "DOT", _this.line));
+                    tokens.push(new Token(".", "PUNC", "DOT", this.line));
                 }
                 p=true;
-                tokens.push(new Token(nm, "NAME", "NAME", _this.line));
-            });
+                tokens.push(new Token(nm, "NAME", "NAME", this.line));
+            }
             return true;
                 
 
-        },
+        }
 
         /**
             @returns {Boolean} Was the token found?
          */
-        read_punc : function(/**JSDOC.TokenStream*/stream, tokens) {
-            var found = "";
-            var name;
-            while (!stream.look().eof && Lang.punc(found+stream.look())) {
+        public bool read_punc (TextStream stream, TokenArray tokens) throws TokenReader_Error
+        {
+            string found = "";
+            
+            while (!stream.lookEOF()) {
+                       var ns = stream.look().to_string();
+
+                   if (null == Lang.punc(found + ns )) {
+                               break;
+                       }
                 found += stream.next();
             }
             
             
-            if (found === "") {
+            if (found == "") {
                 return false;
             }
             
-            if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
+            var ls = tokens.lastSym();
+            
+            if ((found == "}" || found == "]") && ls != null && ls.data == ",") {
                 //print("Error - comma found before " + found);
                 //print(JSON.stringify(tokens.lastSym(), null,4));
                 if (this.ignoreBadGrammer) {
-                    print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
+                    print("\n" + this.filename + ":" + this.line.to_string() + " Error - comma found before " + found);
                 } else {
-                    
-                    throw {
-                        name : "ArgumentError", 
-                        message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
-                    }
+                    throw new TokenReader_Error.ArgumentError(
+                                this.filename + ":" + this.line.to_string() + "  comma found before " + found
+                  
+                    );
+                     
                 }
             }
             
             tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
             return true;
             
-        },
+        } 
 
         /**
             @returns {Boolean} Was the token found?
          */
-        read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
+        public bool read_space  (TextStream stream, TokenArray tokens)
+        {
             var found = "";
             
-            while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
+            while (!stream.lookEOF() && Lang.isSpaceC(  stream.look()) && !Lang.isNewlineC(stream.look())) {
                 found += stream.next();
             }
             
-            if (found === "") {
+            if (found == "") {
                 return false;
             }
-            //print("WHITE = " + JSON.stringify(found)); 
-            if (this.collapseWhite) found = " ";
-            if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
+            //print("WHITE = " + JSON.stringify(found));
+            
+             
+            if (this.collapseWhite) {
+                found = " "; // this might work better if it was a '\n' ???
+            }
+            if (this.keepWhite) {
+                tokens.push(new Token(found, "WHIT", "SPACE", this.line));
+            }
             return true;
         
-        },
+        }
 
         /**
             @returns {Boolean} Was the token found?
          */
-        read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
+        public bool read_newline  (TextStream stream, TokenArray tokens)
+        {
             var found = "";
             var line = this.line;
-            while (!stream.look().eof && Lang.isNewline(stream.look())) {
+            while (!stream.lookEOF() && Lang.isNewlineC(stream.look())) {
                 this.line++;
                 found += stream.next();
             }
             
-            if (found === "") {
+            if (found == "") {
                 return false;
             }
+            
+            // if we found a new line, then we could check if previous character was a ';' - if so we can drop it.
+            // otherwise generally keep it.. in which case it should reduce our issue with stripping new lines..
+           
+            
             //this.line++;
             if (this.collapseWhite) {
-                found = "\n";
+                found = "\n"; // reduces multiple line breaks into a single one...
             }
-             if (this.keepWhite) {
-                var last = tokens ? tokens.pop() : false;
-                if (last && last.name != "WHIT") {
+            
+            if (this.keepWhite) {
+                var last = tokens.pop();
+                if (last != null && last.name != "WHIT") {
                     tokens.push(last);
                 }
-                
+                // replaces last new line... 
                 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
             }
             return true;
-        },
+        }
 
         /**
             @returns {Boolean} Was the token found?
          */
-        read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
-            if (stream.look() == "/" && stream.look(1) == "*") {
-                var found = stream.next(2);
-                var c = '';
-                var line = this.line;
-                while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
-                    c = stream.next();
-                    if (c == "\n") this.line++;
-                    found += c;
+        public bool read_mlcomment  (TextStream stream, TokenArray tokens)
+        {
+            if (stream.look() != '/') {
+                return false;
+            }
+            if (stream.look(1) != '*') {
+                return false;
+            }
+            var found = stream.next(2);
+            string  c = "";
+            var line = this.line;
+            while (!stream.lookEOF() && !(stream.look(-1) == '/' && stream.look(-2) == '*')) {
+                c = stream.next();
+                if (c == "\n") {
+                    this.line++;
                 }
-                
-                // to start doclet we allow /** or /*** but not /**/ or /****
-                if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
-                else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
-                return true;
+                found += c;
             }
-            return false;
-        },
+            
+            // to start doclet we allow /** or /*** but not /**/ or /****
+            //if (found.length /^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) {
+            if (this.keepDocs && found.length > 4 && found.index_of("/**") == 0 && found[3] != '/') {
+                tokens.push(new Token(found, "COMM", "JSDOC", this.line));
+            } else if (this.keepComments) {
+                tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
+            }
+            return true;
+        
+        } 
 
         /**
             @returns {Boolean} Was the token found?
          */
-        read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
-            var found;
+         public bool read_slcomment  (TextStream stream, TokenArray tokens)
+         {
+            var found = "";
             if (
-                (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
+                (stream.look() == '/' && stream.look(1) == '/' && (""!=(found=stream.next(2))))
                 || 
-                (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
+                (stream.look() == '<' && stream.look(1) == '!' && stream.look(2) == '-' && stream.look(3) == '-' && (""!=(found=stream.next(4))))
             ) {
                 var line = this.line;
-                while (!stream.look().eof && !Lang.isNewline(stream.look())) {
+                while (!stream.lookEOF()) {
+                                       //print(stream.look().to_string());
+                       if ( Lang.isNewline(stream.look().to_string())) {
+                               break;
+                       }
                     found += stream.next();
                 }
-                if (!stream.look().eof) {
+                if (!stream.lookEOF()) { // lookinng for end  of line... if we got it, then do not eat the character..
                     found += stream.next();
                 }
                 if (this.keepComments) {
@@ -357,176 +419,162 @@ namespace JSDOC {
                 return true;
             }
             return false;
-        },
+        }
 
         /**
             @returns {Boolean} Was the token found?
          */
-        read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
-            if (stream.look() == "\"") {
+        public bool read_dbquote  (TextStream stream, TokenArray tokens)
+        {
+            if (stream.look() != '"') {
+                return false;
+            }
                 // find terminator
-                var string = stream.next();
-                
-                while (!stream.look().eof) {
-                    if (stream.look() == "\\") {
-                        if (Lang.isNewline(stream.look(1))) {
-                            do {
-                                stream.next();
-                            } while (!stream.look().eof && Lang.isNewline(stream.look()));
-                            string += "\\\n";
-                        }
-                        else {
-                            string += stream.next(2);
-                        }
-                    }
-                    else if (stream.look() == "\"") {
-                        string += stream.next();
-                        tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
-                        return true;
+            var str = stream.next();
+            
+            while (!stream.lookEOF()) {
+                if (stream.look() == '\\') {
+                    if (Lang.isNewline(stream.look(1).to_string())) {
+                        do {
+                            stream.next();
+                        } while (!stream.lookEOF() && Lang.isNewline(stream.look().to_string()));
+                        str += "\\\n";
                     }
                     else {
-                        string += stream.next();
+                        str += stream.next(2);
                     }
+                    continue;
+                }
+                if (stream.look() == '"') {
+                    str += stream.next();
+                    tokens.push(new Token(str, "STRN", "DOUBLE_QUOTE", this.line));
+                    return true;
                 }
+            
+                str += stream.next();
+                
             }
-            return false; // error! unterminated string
-        },
+            return false;
+        }
 
         /**
             @returns {Boolean} Was the token found?
          */
-        read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
-            if (stream.look() == "'") {
-                // find terminator
-                var string = stream.next();
-                
-                while (!stream.look().eof) {
-                    if (stream.look() == "\\") { // escape sequence
-                        string += stream.next(2);
-                    }
-                    else if (stream.look() == "'") {
-                        string += stream.next();
-                        tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
-                        return true;
-                    }
-                    else {
-                        string += stream.next();
-                    }
+        public bool read_snquote  (TextStream stream, TokenArray tokens)
+        {
+            if (stream.look() != '\'') {
+                return false;
+            }
+            // find terminator
+            var str = stream.next();
+            
+            while (!stream.lookEOF()) {
+                if (stream.look() == '\\') { // escape sequence
+                    str += stream.next(2);
+                    continue;
                 }
+                if (stream.look() == '\'') {
+                    str += stream.next();
+                    tokens.push(new Token(str, "STRN", "SINGLE_QUOTE", this.line));
+                    return true;
+                }
+                str += stream.next();
+                
             }
-            return false; // error! unterminated string
-        },
+            return false;
+        }
+        
 
         /**
             @returns {Boolean} Was the token found?
          */
-        read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
-            if (stream.look() === "0" && stream.look(1) == "x") {
+        public bool read_numb  (TextStream stream, TokenArray tokens)
+        {
+            if (stream.look() == '0' && stream.look(1) == 'x') {
                 return this.read_hex(stream, tokens);
             }
             
             var found = "";
             
-            while (!stream.look().eof && Lang.isNumber(found+stream.look())){
+            while (!stream.lookEOF() && Lang.isNumber(found+stream.look().to_string())){
                 found += stream.next();
             }
             
-            if (found === "") {
+            if (found == "") {
                 return false;
             }
-            else {
-                if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
-                else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
+            if (GLib.Regex.match_simple("^0[0-7]", found)) {
+                tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
                 return true;
             }
-        },
-        /*t:
-            requires("../lib/JSDOC/TextStream.js");
-            requires("../lib/JSDOC/Token.js");
-            requires("../lib/JSDOC/Lang.js");
-            
-            plan(3, "testing read_numb");
-            
-            //// setup
-            var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
-            var tr = new TokenReader();
-            var tokens = tr.tokenize(new TextStream(src));
-            
-            var hexToken, octToken, decToken;
-            for (var i = 0; i < tokens.length; i++) {
-                if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
-                if (tokens[i].name == "OCTAL") octToken = tokens[i];
-                if (tokens[i].name == "DECIMAL") decToken = tokens[i];
-            }
-            ////
-            
-            is(decToken.data, "8.0", "decimal number is found in source.");
-            is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
-            is(octToken.data, "0777", "octal number is found in source.");
-        */
-
+            tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
+            return true;
+        
+        }
+       
         /**
             @returns {Boolean} Was the token found?
          */
-        read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
+        public bool read_hex  (TextStream stream, TokenArray tokens)
+        {
             var found = stream.next(2);
             
-            while (!stream.look().eof) {
-                if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
+            while (!stream.lookEOF()) {
+                if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look().to_string())) { // done
                     tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
                     return true;
                 }
-                else {
-                    found += stream.next();
-                }
+                
+                found += stream.next();
+               
             }
             return false;
-        },
+        }
 
         /**
             @returns {Boolean} Was the token found?
          */
-        read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
-            var last;
+        public bool read_regx (TextStream stream, TokenArray tokens)
+        {
+              
+            if (stream.look() != '/') {
+                return false;
+            }
+            var  last = tokens.lastSym();
             if (
-                stream.look() == "/"
-                && 
+                (last == null)
+                || 
                 (
-                    
-                    (
-                        !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
-                        || 
-                        (
-                               !last.is("NUMB")
-                            && !last.is("NAME")
-                            && !last.is("RIGHT_PAREN")
-                            && !last.is("RIGHT_BRACKET")
-                        )
-                    )
+                       !last.is("NUMB")   // stuff that can not appear before a regex..
+                    && !last.is("NAME")
+                    && !last.is("RIGHT_PAREN")
+                    && !last.is("RIGHT_BRACKET")
                 )
-            ) {
+            )  {
                 var regex = stream.next();
                 
-                while (!stream.look().eof) {
-                    if (stream.look() == "\\") { // escape sequence
+                while (!stream.lookEOF()) {
+                    if (stream.look() == '\\') { // escape sequence
                         regex += stream.next(2);
+                        continue;
                     }
-                    else if (stream.look() == "/") {
+                    if (stream.look() == '/') {
                         regex += stream.next();
                         
-                        while (/[gmi]/.test(stream.look())) {
+                        while (GLib.Regex.match_simple("[gmi]", stream.look().to_string())) {
                             regex += stream.next();
                         }
                         
                         tokens.push(new Token(regex, "REGX", "REGX", this.line));
                         return true;
                     }
-                    else {
-                        regex += stream.next();
-                    }
+                     
+                    regex += stream.next();
+                     
                 }
                 // error: unterminated regex
             }
             return false;
         }
-});
\ No newline at end of file
+    }
+}
\ No newline at end of file