public class TokenArray: Object {
- Gee.ArrayList<Token> tokens;
+ public Gee.ArrayList<Token> tokens;
+ public int length {
+ get { return this.tokens.size }
+ }
public TokenArray()
{
}
return null;
}
- public Token? lastSym = function() {
+ public Token? lastSym () {
for (var i = this.tokens.length-1; i >= 0; i--) {
if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM"))) {
return this.tokens.get(i);
}
return null;
}
+ public void push (Token t) {
+ this.tokens.add(t);
+ }
+ public Token get(int i) {
+ return this.tokens.get(i);
+ }
}
+ errordomain TokenReader_Error {
+ ArgumentError
+ }
+
public class TokenReader : Object
{
this.line =1;
var tokens = new TokenArray();
-
- while (!stream.look().eof) {
+ bool eof;
+ while (!stream.lookEOF()) {
+
+
if (this.read_mlcomment(stream, tokens)) continue;
if (this.read_slcomment(stream, tokens)) continue;
if (this.read_dbquote(stream, tokens)) continue;
if (this.read_word(stream, tokens)) continue;
// if execution reaches here then an error has happened
- tokens.push(new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line));
+ tokens.push(
+ new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
+ );
}
return tokens;
- },
+ }
/**
* findPuncToken - find the id of a token (previous to current)
* @arg {Number} offset where to start reading from
* @return {Number} position of token
*/
- findPuncToken : function(tokens, data, n) {
+ public int findPuncToken(TokenArray tokens, string data, int n)
+ {
n = n || tokens.length -1;
var stack = 0;
while (n > -1) {
- if (!stack && tokens[n].data == data) {
+ if (!stack && tokens.get(n).data == data) {
return n;
}
- if (tokens[n].data == ')' || tokens[n].data == '}') {
+ if (tokens.get(n).data == ')' || tokens.get(n).data == '}') {
stack++;
n--;
continue;
}
- if (stack && (tokens[n].data == '{' || tokens[n].data == '(')) {
+ if (stack && (tokens.get(n).data == '{' || tokens.get(n).data == '(')) {
stack--;
n--;
continue;
n--;
}
return -1;
- },
+ }
/**
* lastSym - find the last token symbol
* need to back check syntax..
* @arg {Number} offset where to start..
* @return {Token} the token
*/
- lastSym : function(tokens, n) {
+ public Token lastSym(TokenArray tokens, int n)
+ {
for (var i = n-1; i >= 0; i--) {
- if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
+ if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
+ return tokens.get(i);
+ }
}
return null;
- },
+ }
/**
@returns {Boolean} Was the token found?
*/
- read_word : function(/**JSDOC.TokenStream*/stream, tokens) {
- var found = "";
- while (!stream.look().eof && Lang.isWordChar(stream.look())) {
+ public bool read_word (TokenStream stream, TokenArray tokens)
+ {
+ string found = "";
+ while (!stream.lookEOF() && Lang.isWordChar(stream.look())) {
found += stream.next();
}
- if (found === "") {
+ if (found == "") {
return false;
}
- var name;
- if ((name = Lang.keyword(found))) {
- if (found == 'return' && tokens.lastSym().data == ')') {
+ var name = Lang.keyword(found);
+ if (name != null) {
+
+ // look for "()return" ?? why ???
+ var ls = tokens.lastSym();
+ if (found == "return" && ls != null && ls.data == ")") {
//Seed.print('@' + tokens.length);
- var n = this.findPuncToken(tokens, ')');
+ var n = this.findPuncToken(tokens, ")");
//Seed.print(')@' + n);
- n = this.findPuncToken(tokens, '(', n-1);
+ n = this.findPuncToken(tokens, "(", n-1);
//Seed.print('(@' + n);
var lt = this.lastSym(tokens, n);
- print(JSON.stringify(lt));
- if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) {
+
+ //print(JSON.stringify(lt));
+ if (lt.type != "KEYW" || ["IF", 'WHILE'].indexOf(lt.name) < -1) {
if (!this.ignoreBadGrammer) {
- throw {
- name : "ArgumentError",
- message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
- }
+ throw new TokenReader_Error.ArgumentError(
+ this.filename + ":" + this.line + " Error - return found after )"
+ );
}
}
tokens.push(new Token(found, "KEYW", name, this.line));
return true;
}
+
if (!this.sepIdents || found.indexOf('.') < 0 ) {
tokens.push(new Token(found, "NAME", "NAME", this.line));
return true;
}
var n = found.split('.');
var p = false;
- var _this = this;
- n.forEach(function(nm) {
+ foreach (unowned string nm in n) {
if (p) {
- tokens.push(new Token('.', "PUNC", "DOT", _this.line));
+ tokens.push(new Token('.', "PUNC", "DOT", this.line));
}
p=true;
- tokens.push(new Token(nm, "NAME", "NAME", _this.line));
- });
+ tokens.push(new Token(nm, "NAME", "NAME", this.line));
+ }
return true;
- },
+ }
/**
@returns {Boolean} Was the token found?
*/
- read_punc : function(/**JSDOC.TokenStream*/stream, tokens) {
- var found = "";
+ public bool read_punc (TokenStream stream, TokenArray tokens)
+ {
+ string found = "";
var name;
- while (!stream.look().eof && Lang.punc(found+stream.look())) {
+ while (!stream.lookEOF() && Lang.punc(found + stream.look()).length > 0) {
found += stream.next();
}
return false;
}
- if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
+ var ls = tokens.lastSym();
+
+ if ((found == "}" || found == "]") && ls != null && ls.data == ",") {
//print("Error - comma found before " + found);
//print(JSON.stringify(tokens.lastSym(), null,4));
if (this.ignoreBadGrammer) {
print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
} else {
-
- throw {
- name : "ArgumentError",
- message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
- }
+ throw new TokenReader_Error.ArgumentError(
+ this.filename + ":" + this.line + " comma found before " + found
+
+ );
+
}
}
tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
return true;
- },
+ }
/**
@returns {Boolean} Was the token found?
*/
- read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
+ public bool read_space (TokenStream stream, TokenArray tokens)
+ {
var found = "";
- while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
+ while (!stream.lookEOF() && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
found += stream.next();
}
if (found === "") {
return false;
}
- //print("WHITE = " + JSON.stringify(found));
- if (this.collapseWhite) found = " ";
- if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
+ //print("WHITE = " + JSON.stringify(found));
+
+ // if we found a new line, then we could check if previous character was a ';' - if so we can drop it.
+ // otherwise generally keep it.. in which case it should reduce our issue with stripping new lines..
+
+ if (this.collapseWhite) {
+ found = " "; // this might work better if it was a '\n' ???
+ }
+ if (this.keepWhite) {
+ tokens.push(new Token(found, "WHIT", "SPACE", this.line));
+ }
return true;
},
/**
@returns {Boolean} Was the token found?
*/
- read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
+ public bool read_newline (TokenStream stream, TokenArray tokens)
var found = "";
var line = this.line;
while (!stream.look().eof && Lang.isNewline(stream.look())) {