X-Git-Url: http://git.roojs.org/?a=blobdiff_plain;f=JSDOC%2FTokenReader.vala;h=631fe3431179066c8332ab760dea178c34013c56;hb=981e36e52daf5f0dd3af63dfba6f411e45a26a3e;hp=68f900996522ce32cffff28891a6fe01ae78e529;hpb=26daa4064ee9509dab03c6421594217844127fa9;p=gnome.introspection-doc-generator diff --git a/JSDOC/TokenReader.vala b/JSDOC/TokenReader.vala index 68f9009..631fe34 100644 --- a/JSDOC/TokenReader.vala +++ b/JSDOC/TokenReader.vala @@ -46,6 +46,10 @@ namespace JSDOC { } } + errordomain TokenReader_Error { + ArgumentError + } + public class TokenReader : Object { @@ -90,12 +94,9 @@ namespace JSDOC { var tokens = new TokenArray(); bool eof; - while (true) { + while (!stream.lookEOF()) { + - stream.look(0, out eof) - if (eof) { - break; - } if (this.read_mlcomment(stream, tokens)) continue; if (this.read_slcomment(stream, tokens)) continue; if (this.read_dbquote(stream, tokens)) continue; @@ -116,7 +117,7 @@ namespace JSDOC { return tokens; - }, + } /** * findPuncToken - find the id of a token (previous to current) @@ -127,7 +128,8 @@ namespace JSDOC { * @arg {Number} offset where to start reading from * @return {Number} position of token */ - public int findPuncToken(TokenArray tokens, string data, int n) { + public int findPuncToken(TokenArray tokens, string data, int n) + { n = n || tokens.length -1; var stack = 0; while (n > -1) { @@ -151,7 +153,7 @@ namespace JSDOC { n--; } return -1; - }, + } /** * lastSym - find the last token symbol * need to back check syntax.. @@ -160,45 +162,52 @@ namespace JSDOC { * @arg {Number} offset where to start.. * @return {Token} the token */ - lastSym : function(tokens, n) { + public Token lastSym(TokenArray tokens, int n) + { for (var i = n-1; i >= 0; i--) { - if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i]; + if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) { + return tokens.get(i); + } } return null; - }, + } /** @returns {Boolean} Was the token found? */ - read_word : function(/**JSDOC.TokenStream*/stream, tokens) { - var found = ""; - while (!stream.look().eof && Lang.isWordChar(stream.look())) { + public bool read_word (TokenStream stream, TokenArray tokens) + { + string found = ""; + while (!stream.lookEOF() && Lang.isWordChar(stream.look())) { found += stream.next(); } - if (found === "") { + if (found == "") { return false; } - var name; - if ((name = Lang.keyword(found))) { - if (found == 'return' && tokens.lastSym().data == ')') { + var name = Lang.keyword(found); + if (name != null) { + + // look for "()return" ?? why ??? + var ls = tokens.lastSym(); + if (found == "return" && ls != null && ls.data == ")") { //Seed.print('@' + tokens.length); - var n = this.findPuncToken(tokens, ')'); + var n = this.findPuncToken(tokens, ")"); //Seed.print(')@' + n); - n = this.findPuncToken(tokens, '(', n-1); + n = this.findPuncToken(tokens, "(", n-1); //Seed.print('(@' + n); var lt = this.lastSym(tokens, n); - print(JSON.stringify(lt)); - if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) { + + //print(JSON.stringify(lt)); + if (lt.type != "KEYW" || ["IF", 'WHILE'].indexOf(lt.name) < -1) { if (!this.ignoreBadGrammer) { - throw { - name : "ArgumentError", - message: "\n" + this.filename + ':' + this.line + " Error - return found after )" - } + throw new TokenReader_Error.ArgumentError( + this.filename + ":" + this.line + " Error - return found after )" + ); } } @@ -209,32 +218,33 @@ namespace JSDOC { tokens.push(new Token(found, "KEYW", name, this.line)); return true; } + if (!this.sepIdents || found.indexOf('.') < 0 ) { tokens.push(new Token(found, "NAME", "NAME", this.line)); return true; } var n = found.split('.'); var p = false; - var _this = this; - n.forEach(function(nm) { + foreach (unowned string nm in n) { if (p) { - tokens.push(new Token('.', "PUNC", "DOT", _this.line)); + tokens.push(new Token('.', "PUNC", "DOT", this.line)); } p=true; - tokens.push(new Token(nm, "NAME", "NAME", _this.line)); - }); + tokens.push(new Token(nm, "NAME", "NAME", this.line)); + } return true; - }, + } /** @returns {Boolean} Was the token found? */ - read_punc : function(/**JSDOC.TokenStream*/stream, tokens) { - var found = ""; + public bool read_punc (TokenStream stream, TokenArray tokens) + { + string found = ""; var name; - while (!stream.look().eof && Lang.punc(found+stream.look())) { + while (!stream.look().eof && Lang.punc(found + stream.look()).length > 0) { found += stream.next(); } @@ -243,17 +253,19 @@ namespace JSDOC { return false; } - if ((found == '}' || found == ']') && tokens.lastSym().data == ',') { + var ls = tokens.lastSym(); + + if ((found == "}" || found == "]") && ls != null && ls.data == ",") { //print("Error - comma found before " + found); //print(JSON.stringify(tokens.lastSym(), null,4)); if (this.ignoreBadGrammer) { print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found); } else { - - throw { - name : "ArgumentError", - message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found - } + throw new TokenReader_Error.ArgumentError( + this.filename + ":" + this.line + " comma found before " + found + + ); + } }