},
Object,
{
-
+ collapseWhite : false, // only reduces white space...
/**
* tokenize a stream
- * @return {Array} of tokens
- */
- @type {JSDOC.Token[]}
-
-
+ * @return {Array} of tokens
+ *
+ * ts = new TextStream(File.read(str));
+ * tr = TokenReader({ keepComments : true, keepWhite : true });
+ * tr.tokenize(ts)
+ *
*/
+
tokenize : function(/**JSDOC.TextStream*/stream) {
}
var n = found.split('.');
var p = false;
+ var _this = this;
n.forEach(function(nm) {
if (p) {
- tokens.push(new Token('.', "PUNC", "DOT", this.line));
+ tokens.push(new Token('.', "PUNC", "DOT", _this.line));
}
p=true;
- tokens.push(new Token(nm, "NAME", "NAME", this.line));
+ tokens.push(new Token(nm, "NAME", "NAME", _this.line));
});
return true;
read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
var found = "";
- while (!stream.look().eof && Lang.isSpace(stream.look())) {
+ while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
found += stream.next();
}
if (found === "") {
return false;
}
- else {
- if (this.collapseWhite) found = " ";
- if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
- return true;
- }
+ //print("WHITE = " + JSON.stringify(found));
+ if (this.collapseWhite) found = " ";
+ if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
+ return true;
+
},
/**
*/
read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
var found = "";
-
+ var line = this.line;
while (!stream.look().eof && Lang.isNewline(stream.look())) {
this.line++;
found += stream.next();
if (found === "") {
return false;
}
- else {
- if (this.collapseWhite) found = "\n";
- if (this.keepWhite) tokens.push(new Token(found, "WHIT", "NEWLINE", this.line));
- return true;
+ //this.line++;
+ if (this.collapseWhite) {
+ found = "\n";
+ }
+ if (this.keepWhite) {
+ var last = tokens.pop();
+ if (last.name != "WHIT") {
+ tokens.push(last);
+ }
+
+ tokens.push(new Token(found, "WHIT", "NEWLINE", line));
}
+ return true;
},
/**
if (stream.look() == "/" && stream.look(1) == "*") {
var found = stream.next(2);
var c = '';
+ var line = this.line;
while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
c = stream.next();
if (c == "\n") this.line++;
// to start doclet we allow /** or /*** but not /**/ or /****
if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
- else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", this.line));
+ else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
return true;
}
return false;
||
(stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
) {
-
+ var line = this.line;
while (!stream.look().eof && !Lang.isNewline(stream.look())) {
found += stream.next();
}
-
+ if (!stream.look().eof) {
+ found += stream.next();
+ }
if (this.keepComments) {
- tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", this.line));
+ tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
}
this.line++;
return true;