tokenize : function(/**JSDOC.TextStream*/stream) {
this.line =1;
var tokens = [];
- /**@ignore*/ tokens.last = function() { return tokens[tokens.length-1]; }
- /**@ignore*/ tokens.lastSym = function() {
+ /**@ignore*/
+ tokens.last = function() { return tokens[tokens.length-1]; }
+ /**@ignore*/
+ tokens.lastSym = function() {
for (var i = tokens.length-1; i >= 0; i--) {
if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
}
found += stream.next();
}
+
if (found === "") {
return false;
}
- else {
- tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
- return true;
+
+ if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
+ //print("Error - comma found before " + found);
+ //print(JSON.stringify(tokens.lastSym(), null,4));
+ throw {
+ name : "ArgumentError",
+ message: "Error - comma found before " + found + " on line " + this.line + "\n"
+ }
}
+
+ tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
+ return true;
+
},
/**