JSDOC/TokenReader.vala
[gnome.introspection-doc-generator] / JSDOC / TokenReader.vala
index e69de29..c87f4ac 100644 (file)
@@ -0,0 +1,564 @@
+//<script type="text/javascript">
+
+
+
+//const Token   = imports.Token.Token;
+//const Lang    = imports.Lang.Lang;
+
+/**
+       @class Search a {@link JSDOC.TextStream} for language tokens.
+*/
+
+namespace JSDOC {
+
+    public class TokenArray: Object {
+        
+        public Gee.ArrayList<Token> tokens;
+        public int length {
+            get { return this.tokens.size; }
+        }
+        
+        public TokenArray()
+        {
+            this.items = new Gee.ArrayList<Token>();
+        }
+        
+        public Token? last() {
+            if (this.tokens > 0) {
+                return this.tokens[this.tokens.length-1];
+            }
+            return null;
+        }
+        public Token? lastSym () {
+            for (var i = this.tokens.length-1; i >= 0; i--) {
+                if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM")))  {
+                    return this.tokens.get(i);
+                }
+            }
+            return null;
+        }
+        public void push (Token t) {
+            this.tokens.add(t);
+        }
+        public Token? pop ()
+        {
+            if (this.size > 0) {
+                return this.tokens.remove_at(this.size-1);
+            }
+            return null;
+        }
+        
+        public Token get(int i) {
+            return this.tokens.get(i);
+        }
+    }
+
+    errordomain TokenReader_Error {
+            ArgumentError
+    }
+    
+
+    public class TokenReader : Object
+    {
+        
+        
+        
+        /*
+         *
+         * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
+         */
+        
+        /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
+        public bool collapseWhite = false; // only reduces white space...
+        /** @cfg {Boolean} keepDocs keep JSDOC comments **/
+        public bool keepDocs = true;
+        /** @cfg {Boolean} keepWhite keep White space **/
+        public bool keepWhite = false;
+        /** @cfg {Boolean} keepComments  keep all comments **/
+        public bool keepComments = false;
+        /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
+        public bool sepIdents = false;
+        /** @cfg {String} filename name of file being parsed. **/
+        public string filename = "";
+        /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
+        public bool ignoreBadGrammer = false;
+        
+        
+        int line = 0;
+        
+        /**
+         * tokenize a stream
+         * @return {Array} of tokens
+         * 
+         * ts = new TextStream(File.read(str));
+         * tr = TokenReader({ keepComments : true, keepWhite : true });
+         * tr.tokenize(ts)
+         * 
+         */
+        public TokenArray tokenize(TextStream stream)
+        {
+            this.line =1;
+            var tokens = new TokenArray();
+           
+            bool eof;
+            while (!stream.lookEOF()) {
+                
+                
+                if (this.read_mlcomment(stream, tokens)) continue;
+                if (this.read_slcomment(stream, tokens)) continue;
+                if (this.read_dbquote(stream, tokens))   continue;
+                if (this.read_snquote(stream, tokens))   continue;
+                if (this.read_regx(stream, tokens))      continue;
+                if (this.read_numb(stream, tokens))      continue;
+                if (this.read_punc(stream, tokens))      continue;
+                if (this.read_newline(stream, tokens))   continue;
+                if (this.read_space(stream, tokens))     continue;
+                if (this.read_word(stream, tokens))      continue;
+                
+                // if execution reaches here then an error has happened
+                tokens.push(
+                        new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
+                );
+            }
+            
+            
+            
+            return tokens;
+        }
+
+        /**
+         * findPuncToken - find the id of a token (previous to current)
+         * need to back check syntax..
+         * 
+         * @arg {Array} tokens the array of tokens.
+         * @arg {String} token data (eg. '(')
+         * @arg {Number} offset where to start reading from
+         * @return {Number} position of token
+         */
+        public int findPuncToken(TokenArray tokens, string data, int n)
+        {
+            n = n || tokens.length -1;
+            var stack = 0;
+            while (n > -1) {
+                
+                if (!stack && tokens.get(n).data == data) {
+                    return n;
+                }
+                
+                if (tokens.get(n).data  == ')' || tokens.get(n).data  == '}') {
+                    stack++;
+                    n--;
+                    continue;
+                }
+                if (stack && (tokens.get(n).data  == '{' || tokens.get(n).data  == '(')) {
+                    stack--;
+                    n--;
+                    continue;
+                }
+                
+                
+                n--;
+            }
+            return -1;
+        }
+        /**
+         * lastSym - find the last token symbol
+         * need to back check syntax..
+         * 
+         * @arg {Array} tokens the array of tokens.
+         * @arg {Number} offset where to start..
+         * @return {Token} the token
+         */
+        public Token lastSym(TokenArray tokens, int n)
+        {
+            for (var i = n-1; i >= 0; i--) {
+                if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
+                    return tokens.get(i);
+                }
+            }
+            return null;
+        }
+        
+         
+        
+        /**
+            @returns {Boolean} Was the token found?
+         */
+        public bool read_word (TokenStream stream, TokenArray tokens)
+        {
+            string found = "";
+            while (!stream.lookEOF() && Lang.isWordChar(stream.look())) {
+                found += stream.next();
+            }
+            
+            if (found == "") {
+                return false;
+            }
+            
+            var name = Lang.keyword(found);
+            if (name != null) {
+                
+                // look for "()return" ?? why ???
+                var ls = tokens.lastSym();
+                if (found == "return" && ls != null && ls.data == ")") {
+                    //Seed.print('@' + tokens.length);
+                    var n = this.findPuncToken(tokens, ")");
+                    //Seed.print(')@' + n);
+                    n = this.findPuncToken(tokens, "(", n-1);
+                    //Seed.print('(@' + n);
+                    
+                    var lt = this.lastSym(tokens, n);
+                    /*
+                    //print(JSON.stringify(lt));
+                    if (lt.type != "KEYW" || ["IF", 'WHILE'].indexOf(lt.name) < -1) {
+                        if (!this.ignoreBadGrammer) {
+                            throw new TokenReader_Error.ArgumentError(
+                                this.filename + ":" + this.line + " Error - return found after )"
+                            );
+                        }
+                    }
+                    
+                    */
+                    
+                }
+                
+                tokens.push(new Token(found, "KEYW", name, this.line));
+                return true;
+            }
+            
+            if (!this.sepIdents || found.indexOf('.') < 0 ) {
+                tokens.push(new Token(found, "NAME", "NAME", this.line));
+                return true;
+            }
+            var n = found.split('.');
+            var p = false;
+            foreach (unowned string nm in n) {
+                if (p) {
+                    tokens.push(new Token('.', "PUNC", "DOT", this.line));
+                }
+                p=true;
+                tokens.push(new Token(nm, "NAME", "NAME", this.line));
+            }
+            return true;
+                
+
+        }
+
+        /**
+            @returns {Boolean} Was the token found?
+         */
+        public bool read_punc (TokenStream stream, TokenArray tokens)
+        {
+            string found = "";
+            var name;
+            while (!stream.lookEOF() && Lang.punc(found + stream.look()).length > 0) {
+                found += stream.next();
+            }
+            
+            
+            if (found == "") {
+                return false;
+            }
+            
+            var ls = tokens.lastSym();
+            
+            if ((found == "}" || found == "]") && ls != null && ls.data == ",") {
+                //print("Error - comma found before " + found);
+                //print(JSON.stringify(tokens.lastSym(), null,4));
+                if (this.ignoreBadGrammer) {
+                    print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
+                } else {
+                    throw new TokenReader_Error.ArgumentError(
+                                this.filename + ":" + this.line + "  comma found before " + found
+                  
+                    );
+                     
+                }
+            }
+            
+            tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
+            return true;
+            
+        } 
+
+        /**
+            @returns {Boolean} Was the token found?
+         */
+        public bool read_space  (TokenStream stream, TokenArray tokens)
+        {
+            var found = "";
+            
+            while (!stream.lookEOF() && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
+                found += stream.next();
+            }
+            
+            if (found == "") {
+                return false;
+            }
+            //print("WHITE = " + JSON.stringify(found));
+            
+             
+            if (this.collapseWhite) {
+                found = " "; // this might work better if it was a '\n' ???
+            }
+            if (this.keepWhite) {
+                tokens.push(new Token(found, "WHIT", "SPACE", this.line));
+            }
+            return true;
+        
+        }
+
+        /**
+            @returns {Boolean} Was the token found?
+         */
+        public bool read_newline  (TokenStream stream, TokenArray tokens)
+        {
+            var found = "";
+            var line = this.line;
+            while (!stream.lookEOF() && Lang.isNewline(stream.look())) {
+                this.line++;
+                found += stream.next();
+            }
+            
+            if (found == "") {
+                return false;
+            }
+            
+            // if we found a new line, then we could check if previous character was a ';' - if so we can drop it.
+            // otherwise generally keep it.. in which case it should reduce our issue with stripping new lines..
+           
+            
+            //this.line++;
+            if (this.collapseWhite) {
+                found = "\n"; // reduces multiple line breaks into a single one...
+            }
+            
+            if (this.keepWhite) {
+                var last = tokens.pop();
+                if (last != null && last.name != "WHIT") {
+                    tokens.push(last);
+                }
+                // replaces last new line... 
+                tokens.push(new Token(found, "WHIT", "NEWLINE", line));
+            }
+            return true;
+        },
+
+        /**
+            @returns {Boolean} Was the token found?
+         */
+        public bool read_mlcomment  (TokenStream stream, TokenArray tokens)
+        {
+            if (stream.look() != "/") {
+                return false;
+            }
+            if (stream.look(1) != "*") {
+                return false;
+            }
+            var found = stream.next(2);
+            var c = '';
+            var line = this.line;
+            while (!stream.lookEOF() && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
+                c = stream.next();
+                if (c == "\n") {
+                    this.line++;
+                }
+                found += c;
+            }
+            
+            // to start doclet we allow /** or /*** but not /**/ or /****
+            //if (found.length /^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) {
+            if ((this.keepDocs && found.length > 4 && found.index_of("/**") == 0 && found[3] != "/") {
+                tokens.push(new Token(found, "COMM", "JSDOC", this.line));
+            } else if (this.keepComments) {
+                tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
+            }
+            return true;
+        
+        } 
+
+        /**
+            @returns {Boolean} Was the token found?
+         */
+         public bool read_slcomment  (TokenStream stream, TokenArray tokens)
+         {
+            var found = "";
+            if (
+                (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
+                || 
+                (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
+            ) {
+                var line = this.line;
+                while (!stream.lookEOF() && !Lang.isNewline(stream.look())) {
+                    found += stream.next();
+                }
+                //if (!stream.lookEOF()) { // what? << eat the EOL?
+                    found += stream.next();
+                //}
+                if (this.keepComments) {
+                    tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
+                }
+                this.line++;
+                return true;
+            }
+            return false;
+        }
+
+        /**
+            @returns {Boolean} Was the token found?
+         */
+        public bool read_dbquote  (TokenStream stream, TokenArray tokens)
+        {
+            if (stream.look() != "\"") {
+                return false;
+            }
+                // find terminator
+            var str = stream.next();
+            
+            while (!stream.lookEOF()) {
+                if (stream.look() == "\\") {
+                    if (Lang.isNewline(stream.look(1))) {
+                        do {
+                            stream.next();
+                        } while (!stream.lookEOF() && Lang.isNewline(stream.look()));
+                        str += "\\\n";
+                    }
+                    else {
+                        str += stream.next(2);
+                    }
+                    continue;
+                }
+                if (stream.look() == "\"") {
+                    str += stream.next();
+                    tokens.push(new Token(str, "STRN", "DOUBLE_QUOTE", this.line));
+                    return true;
+                }
+            
+                str += stream.next();
+                
+            }
+            return false;
+        },
+
+        /**
+            @returns {Boolean} Was the token found?
+         */
+        public bool read_snquote  (TokenStream stream, TokenArray tokens)
+        {
+            if (stream.look() != "'") {
+                return false;
+            }
+            // find terminator
+            var str = stream.next();
+            
+            while (!stream.look().eof) {
+                if (stream.look() == "\\") { // escape sequence
+                    str += stream.next(2);
+                    continue;
+                }
+                if (stream.look() == "'") {
+                    str += stream.next();
+                    tokens.push(new Token(str, "STRN", "SINGLE_QUOTE", this.line));
+                    return true;
+                }
+                str += stream.next();
+                
+            }
+            return false;
+        }
+        
+
+        /**
+            @returns {Boolean} Was the token found?
+         */
+        public bool read_numb  (TokenStream stream, TokenArray tokens)
+        {
+            if (stream.look() === "0" && stream.look(1) == "x") {
+                return this.read_hex(stream, tokens);
+            }
+            
+            var found = "";
+            
+            while (!stream.lookEOF() && Lang.isNumber(found+stream.look())){
+                found += stream.next();
+            }
+            
+            if (found === "") {
+                return false;
+            }
+            if (GLib.Regex.match_simple("^0[0-7]", found)) {
+                tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
+                return true;
+            }
+            tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
+            return true;
+        
+        }
+       
+        /**
+            @returns {Boolean} Was the token found?
+         */
+        public bool read_hex  (TokenStream stream, TokenArray tokens)
+        {
+            var found = stream.next(2);
+            
+            while (!stream.lookEOF()) {
+                if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
+                    tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
+                    return true;
+                }
+                
+                found += stream.next();
+               
+            }
+            return false;
+        },
+
+        /**
+            @returns {Boolean} Was the token found?
+         */
+        public bool read_regx (TokenStream stream, TokenArray tokens)
+        {
+            Token last;
+            if (stream.look() != "/") {
+                return false;
+            }
+            var last = tokens.lastSym();
+            if (
+                (last == null)
+                || 
+                (
+                       !last.is("NUMB")   // stuff that can not appear before a regex..
+                    && !last.is("NAME")
+                    && !last.is("RIGHT_PAREN")
+                    && !last.is("RIGHT_BRACKET")
+                )
+            )  {
+                var regex = stream.next();
+                
+                while (!stream.lookEOF()) {
+                    if (stream.look() == "\\") { // escape sequence
+                        regex += stream.next(2);
+                        continue;
+                    }
+                    if (stream.look() == "/") {
+                        regex += stream.next();
+                        
+                        while (GLib.Regex.match_simple("[gmi]", stream.look()) {
+                            regex += stream.next();
+                        }
+                        
+                        tokens.push(new Token(regex, "REGX", "REGX", this.line));
+                        return true;
+                    }
+                     
+                    regex += stream.next();
+                     
+                }
+                // error: unterminated regex
+            }
+            return false;
+        }
+    }
+}
\ No newline at end of file