}
}
+ errordomain TokenReader_Error {
+ ArgumentError
+ }
+
public class TokenReader : Object
{
}
var name = Lang.keyword(found);
- if (name) {
+ if (name != null) {
// look for "()return" ?? why ???
-
- if (found == "return" && tokens.lastSym().data == ")") {
+ var ls = tokens.lastSym();
+ if (found == "return" && ls != null && ls.data == ")") {
//Seed.print('@' + tokens.length);
var n = this.findPuncToken(tokens, ")");
//Seed.print(')@' + n);
//print(JSON.stringify(lt));
if (lt.type != "KEYW" || ["IF", 'WHILE'].indexOf(lt.name) < -1) {
if (!this.ignoreBadGrammer) {
- throw {
- name : "ArgumentError",
- message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
- }
+ throw new TokenReader_Error.ArgumentError(
+ this.filename + ":" + this.line + " Error - return found after )"
+ );
}
}
tokens.push(new Token(found, "KEYW", name, this.line));
return true;
}
+
if (!this.sepIdents || found.indexOf('.') < 0 ) {
tokens.push(new Token(found, "NAME", "NAME", this.line));
return true;
}
var n = found.split('.');
var p = false;
- var _this = this;
- n.forEach(function(nm) {
+ foreach (unowned string nm in n) {
if (p) {
- tokens.push(new Token('.', "PUNC", "DOT", _this.line));
+ tokens.push(new Token('.', "PUNC", "DOT", this.line));
}
p=true;
- tokens.push(new Token(nm, "NAME", "NAME", _this.line));
- });
+ tokens.push(new Token(nm, "NAME", "NAME", this.line));
+ }
return true;
/**
@returns {Boolean} Was the token found?
*/
- read_punc : function(/**JSDOC.TokenStream*/stream, tokens) {
- var found = "";
+ public bool read_punc (TokenStream stream, TokenArray tokens)
+ {
+ string found = "";
var name;
- while (!stream.look().eof && Lang.punc(found+stream.look())) {
+ while (!stream.lookEOF() && Lang.punc(found + stream.look()).length > 0) {
found += stream.next();
}
return false;
}
- if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
+ var ls = tokens.lastSym();
+
+ if ((found == "}" || found == "]") && ls != null && ls.data == ",") {
//print("Error - comma found before " + found);
//print(JSON.stringify(tokens.lastSym(), null,4));
if (this.ignoreBadGrammer) {
print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
} else {
-
- throw {
- name : "ArgumentError",
- message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
- }
+ throw new TokenReader_Error.ArgumentError(
+ this.filename + ":" + this.line + " comma found before " + found
+
+ );
+
}
}
tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
return true;
- },
+ }
/**
@returns {Boolean} Was the token found?
*/
- read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
+ public bool read_space (TokenStream stream, TokenArray tokens)
+ {
var found = "";
- while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
+ while (!stream.lookEOF() && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
found += stream.next();
}
if (found === "") {
return false;
}
- //print("WHITE = " + JSON.stringify(found));
- if (this.collapseWhite) found = " ";
- if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
+ //print("WHITE = " + JSON.stringify(found));
+
+ // if we found a new line, then we could check if previous character was a ';' - if so we can drop it.
+ // otherwise generally keep it.. in which case it should reduce our issue with stripping new lines..
+
+ if (this.collapseWhite) {
+ found = " "; // this might work better if it was a '\n' ???
+ }
+ if (this.keepWhite) {
+ tokens.push(new Token(found, "WHIT", "SPACE", this.line));
+ }
return true;
},
/**
@returns {Boolean} Was the token found?
*/
- read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
+ public bool read_newline (TokenStream stream, TokenArray tokens)
var found = "";
var line = this.line;
while (!stream.look().eof && Lang.isNewline(stream.look())) {