//<script type="text/javascript">
-const XObject = imports.XObject.XObject;
-const console = imports.console.console;
-const Token = imports.Token.Token;
-const Lang = imports.Lang.Lang;
+//const Token = imports.Token.Token;
+//const Lang = imports.Lang.Lang;
/**
@class Search a {@link JSDOC.TextStream} for language tokens.
*/
-const TokenReader = XObject.define(
- function(o) {
+
+namespace JSDOC {
+
+ public class TokenArray: Object {
+
+ public Gee.ArrayList<Token> tokens;
+ public int length {
+ get { return this.tokens.size }
+ }
- XObject.extend(this, o || {});
+ public TokenArray()
+ {
+ this.items = new Gee.ArrayList<Token>();
+ }
- },
- Object,
+ public Token? last() {
+ if (this.tokens > 0) {
+ return this.tokens[this.tokens.length-1];
+ }
+ return null;
+ }
+ public Token? lastSym () {
+ for (var i = this.tokens.length-1; i >= 0; i--) {
+ if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM"))) {
+ return this.tokens.get(i);
+ }
+ }
+ return null;
+ }
+ public void push (Token t) {
+ this.tokens.add(t);
+ }
+ public Token get(int i) {
+ return this.tokens.get(i);
+ }
+ }
+
+
+ public class TokenReader : Object
{
+
+
+
+ /*
+ *
+ * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
+ */
+
/** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
- collapseWhite : false, // only reduces white space...
+ public bool collapseWhite = false, // only reduces white space...
/** @cfg {Boolean} keepDocs keep JSDOC comments **/
- keepDocs : true,
+ public bool keepDocs = true,
/** @cfg {Boolean} keepWhite keep White space **/
- keepWhite : false,
+ public bool keepWhite = false,
/** @cfg {Boolean} keepComments keep all comments **/
- keepComments : false,
+ public bool keepComments = false,
/** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
- sepIdents : false,
+ public bool sepIdents = false,
/** @cfg {String} filename name of file being parsed. **/
- filename : '',
+ public string filename = "";
/** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
- ignoreBadGrammer : false,
+ public bool ignoreBadGrammer = false,
+
+
+ int line = 0;
+
/**
* tokenize a stream
* @return {Array} of tokens
* tr.tokenize(ts)
*
*/
- tokenize : function(/**JSDOC.TextStream*/stream) {
+ public TokenArray tokenize(TextStream stream)
+ {
this.line =1;
- var tokens = [];
- /**@ignore*/
- tokens.last = function() { return tokens[tokens.length-1]; }
- /**@ignore*/
- tokens.lastSym = function() {
- for (var i = tokens.length-1; i >= 0; i--) {
- if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
- }
- return true;
- }
-
- while (!stream.look().eof) {
+ var tokens = new TokenArray();
+
+ bool eof;
+ while (!stream.lookEOF()) {
+
+
if (this.read_mlcomment(stream, tokens)) continue;
if (this.read_slcomment(stream, tokens)) continue;
if (this.read_dbquote(stream, tokens)) continue;
if (this.read_word(stream, tokens)) continue;
// if execution reaches here then an error has happened
- tokens.push(new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line));
+ tokens.push(
+ new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
+ );
}
* @arg {Number} offset where to start reading from
* @return {Number} position of token
*/
- findPuncToken : function(tokens, data, n) {
+ public int findPuncToken(TokenArray tokens, string data, int n) {
n = n || tokens.length -1;
var stack = 0;
while (n > -1) {
- if (!stack && tokens[n].data == data) {
+ if (!stack && tokens.get(n).data == data) {
return n;
}
- if (tokens[n].data == ')' || tokens[n].data == '}') {
+ if (tokens.get(n).data == ')' || tokens.get(n).data == '}') {
stack++;
n--;
continue;
}
- if (stack && (tokens[n].data == '{' || tokens[n].data == '(')) {
+ if (stack && (tokens.get(n).data == '{' || tokens.get(n).data == '(')) {
stack--;
n--;
continue;
* @arg {Number} offset where to start..
* @return {Token} the token
*/
- lastSym : function(tokens, n) {
+ public Token lastSym(TokenArray tokens, int n) {
for (var i = n-1; i >= 0; i--) {
- if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
+ if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
+ return tokens.get(i);
+ }
}
return null;
},
/**
@returns {Boolean} Was the token found?
*/
- read_word : function(/**JSDOC.TokenStream*/stream, tokens) {
+ public bool read_word (TokenStream stream, TokenArray tokens) {
var found = "";
while (!stream.look().eof && Lang.isWordChar(stream.look())) {
found += stream.next();