//<script type="text/javascript">
-
-
+// test code
+
//const Token = imports.Token.Token;
//const Lang = imports.Lang.Lang;
/**
@class Search a {@link JSDOC.TextStream} for language tokens.
*/
-
+
namespace JSDOC {
public class TokenArray: Object {
public new Token get(int i) {
return this.tokens.get(i);
}
+ public void dump()
+ {
+ foreach(var token in this.tokens) {
+ print(token.asString() +"\n");
+ }
+ }
+
}
- errordomain TokenReader_Error {
+ public errordomain TokenReader_Error {
ArgumentError
}
this.line =1;
var tokens = new TokenArray();
- bool eof;
+
while (!stream.lookEOF()) {
-
+
if (this.read_mlcomment(stream, tokens)) continue;
if (this.read_slcomment(stream, tokens)) continue;
if (this.read_dbquote(stream, tokens)) continue;
*/
public int findPuncToken(TokenArray tokens, string data, int n)
{
- n = n > 0 ? n : tokens.length() -1;
+ n = n > 0 ? n : tokens.length -1;
var stack = 0;
while (n > -1) {
- if (!stack && tokens.get(n).data == data) {
+ if (stack < 1 && tokens.get(n).data == data) {
return n;
}
- if (tokens.get(n).data == ')' || tokens.get(n).data == '}') {
+ if (tokens.get(n).data == ")" || tokens.get(n).data == "}") {
stack++;
n--;
continue;
}
- if (stack && (tokens.get(n).data == '{' || tokens.get(n).data == '(')) {
+ if (stack > 0 && (tokens.get(n).data == "{" || tokens.get(n).data == "(")) {
stack--;
n--;
continue;
* @arg {Number} offset where to start..
* @return {Token} the token
*/
- public Token lastSym(TokenArray tokens, int n)
+ public Token? lastSym(TokenArray tokens, int n)
{
for (var i = n-1; i >= 0; i--) {
if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
public bool read_word (TextStream stream, TokenArray tokens)
{
string found = "";
- while (!stream.lookEOF() && Lang.isWordChar(stream.look())) {
+ while (!stream.lookEOF() && Lang.isWordChar(stream.look().to_string())) {
found += stream.next();
}
var ls = tokens.lastSym();
if (found == "return" && ls != null && ls.data == ")") {
//Seed.print('@' + tokens.length);
- var n = this.findPuncToken(tokens, ")");
+ var n = this.findPuncToken(tokens, ")", 0);
//Seed.print(')@' + n);
n = this.findPuncToken(tokens, "(", n-1);
//Seed.print('(@' + n);
- var lt = this.lastSym(tokens, n);
+ //var lt = this.lastSym(tokens, n);
/*
//print(JSON.stringify(lt));
if (lt.type != "KEYW" || ["IF", 'WHILE'].indexOf(lt.name) < -1) {
return true;
}
- if (!this.sepIdents || found.indexOf('.') < 0 ) {
+ if (!this.sepIdents || found.index_of(".") < 0 ) {
tokens.push(new Token(found, "NAME", "NAME", this.line));
return true;
}
- var n = found.split('.');
+ var n = found.split(".");
var p = false;
foreach (unowned string nm in n) {
if (p) {
- tokens.push(new Token('.', "PUNC", "DOT", this.line));
+ tokens.push(new Token(".", "PUNC", "DOT", this.line));
}
p=true;
tokens.push(new Token(nm, "NAME", "NAME", this.line));
/**
@returns {Boolean} Was the token found?
*/
- public bool read_punc (TextStream stream, TokenArray tokens)
+ public bool read_punc (TextStream stream, TokenArray tokens) throws TokenReader_Error
{
string found = "";
- var name;
- while (!stream.lookEOF() && Lang.punc(found + stream.look()).length > 0) {
+
+ while (!stream.lookEOF()) {
+ var ns = stream.look().to_string();
+
+ if (null == Lang.punc(found + ns )) {
+ break;
+ }
found += stream.next();
}
//print("Error - comma found before " + found);
//print(JSON.stringify(tokens.lastSym(), null,4));
if (this.ignoreBadGrammer) {
- print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
+ print("\n" + this.filename + ":" + this.line.to_string() + " Error - comma found before " + found);
} else {
throw new TokenReader_Error.ArgumentError(
- this.filename + ":" + this.line + " comma found before " + found
+ this.filename + ":" + this.line.to_string() + " comma found before " + found
);
// to start doclet we allow /** or /*** but not /**/ or /****
//if (found.length /^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) {
- if (this.keepDocs && found.length > 4 && found.index_of("/**") == 0 && found[3] != "/") {
+ if (this.keepDocs && found.length > 4 && found.index_of("/**") == 0 && found[3] != '/') {
tokens.push(new Token(found, "COMM", "JSDOC", this.line));
} else if (this.keepComments) {
tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
{
var found = "";
if (
- (stream.look() == '/' && stream.look(1) == '/' && (found=stream.next(2)))
+ (stream.look() == '/' && stream.look(1) == '/' && (""!=(found=stream.next(2))))
||
- (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
+ (stream.look() == '<' && stream.look(1) == '!' && stream.look(2) == '-' && stream.look(3) == '-' && (""!=(found=stream.next(4))))
) {
var line = this.line;
- while (!stream.lookEOF() && !Lang.isNewline(stream.look())) {
+ while (!stream.lookEOF()) {
+ //print(stream.look().to_string());
+ if ( Lang.isNewline(stream.look().to_string())) {
+ break;
+ }
found += stream.next();
}
- //if (!stream.lookEOF()) { // what? << eat the EOL?
+ if (!stream.lookEOF()) { // lookinng for end of line... if we got it, then do not eat the character..
found += stream.next();
- //}
+ }
if (this.keepComments) {
tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
}
*/
public bool read_dbquote (TextStream stream, TokenArray tokens)
{
- if (stream.look() != "\"") {
+ if (stream.look() != '"') {
return false;
}
// find terminator
var str = stream.next();
while (!stream.lookEOF()) {
- if (stream.look() == "\\") {
- if (Lang.isNewline(stream.look(1))) {
+ if (stream.look() == '\\') {
+ if (Lang.isNewline(stream.look(1).to_string())) {
do {
stream.next();
- } while (!stream.lookEOF() && Lang.isNewline(stream.look()));
+ } while (!stream.lookEOF() && Lang.isNewline(stream.look().to_string()));
str += "\\\n";
}
else {
}
continue;
}
- if (stream.look() == "\"") {
+ if (stream.look() == '"') {
str += stream.next();
tokens.push(new Token(str, "STRN", "DOUBLE_QUOTE", this.line));
return true;
*/
public bool read_snquote (TextStream stream, TokenArray tokens)
{
- if (stream.look() != "'") {
+ if (stream.look() != '\'') {
return false;
}
// find terminator
var str = stream.next();
- while (!stream.look().eof) {
- if (stream.look() == "\\") { // escape sequence
+ while (!stream.lookEOF()) {
+ if (stream.look() == '\\') { // escape sequence
str += stream.next(2);
continue;
}
- if (stream.look() == "'") {
+ if (stream.look() == '\'') {
str += stream.next();
tokens.push(new Token(str, "STRN", "SINGLE_QUOTE", this.line));
return true;
*/
public bool read_numb (TextStream stream, TokenArray tokens)
{
- if (stream.look() == "0" && stream.look(1) == "x") {
+ if (stream.look() == '0' && stream.look(1) == 'x') {
return this.read_hex(stream, tokens);
}
var found = "";
- while (!stream.lookEOF() && Lang.isNumber(found+stream.look())){
+ while (!stream.lookEOF() && Lang.isNumber(found+stream.look().to_string())){
found += stream.next();
}
/**
@returns {Boolean} Was the token found?
*/
- public bool read_hex (TokenStream stream, TokenArray tokens)
+ public bool read_hex (TextStream stream, TokenArray tokens)
{
var found = stream.next(2);
while (!stream.lookEOF()) {
- if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
+ if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look().to_string())) { // done
tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
return true;
}
*/
public bool read_regx (TextStream stream, TokenArray tokens)
{
- Token last;
- if (stream.look() != "/") {
+
+ if (stream.look() != '/') {
return false;
}
- var last = tokens.lastSym();
+ var last = tokens.lastSym();
if (
(last == null)
||
var regex = stream.next();
while (!stream.lookEOF()) {
- if (stream.look() == "\\") { // escape sequence
+ if (stream.look() == '\\') { // escape sequence
regex += stream.next(2);
continue;
}
- if (stream.look() == "/") {
+ if (stream.look() == '/') {
regex += stream.next();
- while (GLib.Regex.match_simple("[gmi]", stream.look())) {
+ while (GLib.Regex.match_simple("[gmi]", stream.look().to_string())) {
regex += stream.next();
}