1 //<script type="text/javascript">
4 const XObject = imports.XObject.XObject;
5 const console = imports.console.console;
8 const Token = imports.Token.Token;
9 const Lang = imports.Lang.Lang;
12 @class Search a {@link JSDOC.TextStream} for language tokens.
14 const TokenReader = XObject.define(
17 XObject.extend(this, o || {});
22 /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
23 collapseWhite : false, // only reduces white space...
24 /** @cfg {Boolean} keepDocs keep JSDOC comments **/
26 /** @cfg {Boolean} keepWhite keep White space **/
28 /** @cfg {Boolean} keepComments keep all comments **/
30 /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
32 /** @cfg {String} filename name of file being parsed. **/
34 /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
35 ignoreBadGrammer : false,
38 * @return {Array} of tokens
40 * ts = new TextStream(File.read(str));
41 * tr = TokenReader({ keepComments : true, keepWhite : true });
45 tokenize : function(/**JSDOC.TextStream*/stream) {
49 tokens.last = function() { return tokens[tokens.length-1]; }
51 tokens.lastSym = function() {
52 for (var i = tokens.length-1; i >= 0; i--) {
53 if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
58 while (!stream.look().eof) {
59 if (this.read_mlcomment(stream, tokens)) continue;
60 if (this.read_slcomment(stream, tokens)) continue;
61 if (this.read_dbquote(stream, tokens)) continue;
62 if (this.read_snquote(stream, tokens)) continue;
63 if (this.read_regx(stream, tokens)) continue;
64 if (this.read_numb(stream, tokens)) continue;
65 if (this.read_punc(stream, tokens)) continue;
66 if(typeof(tokens) == 'undefined') {console.log('empty????');}
67 if (this.read_newline(stream, tokens)) continue;
68 if (this.read_space(stream, tokens)) continue;
69 if (this.read_word(stream, tokens)) continue;
71 // if execution reaches here then an error has happened
72 tokens.push(new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line));
81 * findPuncToken - find the id of a token (previous to current)
82 * need to back check syntax..
84 * @arg {Array} tokens the array of tokens.
85 * @arg {String} token data (eg. '(')
86 * @arg {Number} offset where to start reading from
87 * @return {Number} position of token
89 findPuncToken : function(tokens, data, n) {
90 n = n || tokens.length -1;
94 if (!stack && tokens[n].data == data) {
98 if (tokens[n].data == ')' || tokens[n].data == '}') {
103 if (stack && (tokens[n].data == '{' || tokens[n].data == '(')) {
115 * lastSym - find the last token symbol
116 * need to back check syntax..
118 * @arg {Array} tokens the array of tokens.
119 * @arg {Number} offset where to start..
120 * @return {Token} the token
122 lastSym : function(tokens, n) {
123 for (var i = n-1; i >= 0; i--) {
124 if (!(tokens[i].is("WHIT") || tokens[i].is("COMM"))) return tokens[i];
132 @returns {Boolean} Was the token found?
134 read_word : function(/**JSDOC.TokenStream*/stream, tokens) {
136 while (!stream.look().eof && Lang.isWordChar(stream.look())) {
137 found += stream.next();
145 if ((name = Lang.keyword(found))) {
146 if (found == 'return' && tokens.lastSym().data == ')') {
147 //Seed.print('@' + tokens.length);
148 var n = this.findPuncToken(tokens, ')');
149 //Seed.print(')@' + n);
150 n = this.findPuncToken(tokens, '(', n-1);
151 //Seed.print('(@' + n);
153 var lt = this.lastSym(tokens, n);
154 print(JSON.stringify(lt));
155 if (lt.type != 'KEYW' || ['IF', 'WHILE'].indexOf(lt.name) < -1) {
156 if (!this.ignoreBadGrammer) {
158 name : "ArgumentError",
159 message: "\n" + this.filename + ':' + this.line + " Error - return found after )"
168 tokens.push(new Token(found, "KEYW", name, this.line));
171 if (!this.sepIdents || found.indexOf('.') < 0 ) {
172 tokens.push(new Token(found, "NAME", "NAME", this.line));
175 var n = found.split('.');
178 n.forEach(function(nm) {
180 tokens.push(new Token('.', "PUNC", "DOT", _this.line));
183 tokens.push(new Token(nm, "NAME", "NAME", _this.line));
191 @returns {Boolean} Was the token found?
193 read_punc : function(/**JSDOC.TokenStream*/stream, tokens) {
196 while (!stream.look().eof && Lang.punc(found+stream.look())) {
197 found += stream.next();
205 if ((found == '}' || found == ']') && tokens.lastSym().data == ',') {
206 //print("Error - comma found before " + found);
207 //print(JSON.stringify(tokens.lastSym(), null,4));
208 if (this.ignoreBadGrammer) {
209 print("\n" + this.filename + ':' + this.line + " Error - comma found before " + found);
213 name : "ArgumentError",
214 message: "\n" + this.filename + ':' + this.line + " Error - comma found before " + found
219 tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
225 @returns {Boolean} Was the token found?
227 read_space : function(/**JSDOC.TokenStream*/stream, tokens) {
230 while (!stream.look().eof && Lang.isSpace(stream.look()) && !Lang.isNewline(stream.look())) {
231 found += stream.next();
237 //print("WHITE = " + JSON.stringify(found));
238 if (this.collapseWhite) found = " ";
239 if (this.keepWhite) tokens.push(new Token(found, "WHIT", "SPACE", this.line));
245 @returns {Boolean} Was the token found?
247 read_newline : function(/**JSDOC.TokenStream*/stream, tokens) {
249 var line = this.line;
250 while (!stream.look().eof && Lang.isNewline(stream.look())) {
252 found += stream.next();
259 if (this.collapseWhite) {
262 if (this.keepWhite) {
263 console.log('empty????');
264 var last = tokens.pop();
265 if (last && last.name != "WHIT") {
269 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
275 @returns {Boolean} Was the token found?
277 read_mlcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
278 if (stream.look() == "/" && stream.look(1) == "*") {
279 var found = stream.next(2);
281 var line = this.line;
282 while (!stream.look().eof && !(stream.look(-1) == "/" && stream.look(-2) == "*")) {
284 if (c == "\n") this.line++;
288 // to start doclet we allow /** or /*** but not /**/ or /****
289 if (/^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) tokens.push(new Token(found, "COMM", "JSDOC", this.line));
290 else if (this.keepComments) tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
297 @returns {Boolean} Was the token found?
299 read_slcomment : function(/**JSDOC.TokenStream*/stream, tokens) {
302 (stream.look() == "/" && stream.look(1) == "/" && (found=stream.next(2)))
304 (stream.look() == "<" && stream.look(1) == "!" && stream.look(2) == "-" && stream.look(3) == "-" && (found=stream.next(4)))
306 var line = this.line;
307 while (!stream.look().eof && !Lang.isNewline(stream.look())) {
308 found += stream.next();
310 if (!stream.look().eof) {
311 found += stream.next();
313 if (this.keepComments) {
314 tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
323 @returns {Boolean} Was the token found?
325 read_dbquote : function(/**JSDOC.TokenStream*/stream, tokens) {
326 if (stream.look() == "\"") {
328 var string = stream.next();
330 while (!stream.look().eof) {
331 if (stream.look() == "\\") {
332 if (Lang.isNewline(stream.look(1))) {
335 } while (!stream.look().eof && Lang.isNewline(stream.look()));
339 string += stream.next(2);
342 else if (stream.look() == "\"") {
343 string += stream.next();
344 tokens.push(new Token(string, "STRN", "DOUBLE_QUOTE", this.line));
348 string += stream.next();
352 return false; // error! unterminated string
356 @returns {Boolean} Was the token found?
358 read_snquote : function(/**JSDOC.TokenStream*/stream, tokens) {
359 if (stream.look() == "'") {
361 var string = stream.next();
363 while (!stream.look().eof) {
364 if (stream.look() == "\\") { // escape sequence
365 string += stream.next(2);
367 else if (stream.look() == "'") {
368 string += stream.next();
369 tokens.push(new Token(string, "STRN", "SINGLE_QUOTE", this.line));
373 string += stream.next();
377 return false; // error! unterminated string
381 @returns {Boolean} Was the token found?
383 read_numb : function(/**JSDOC.TokenStream*/stream, tokens) {
384 if (stream.look() === "0" && stream.look(1) == "x") {
385 return this.read_hex(stream, tokens);
390 while (!stream.look().eof && Lang.isNumber(found+stream.look())){
391 found += stream.next();
398 if (/^0[0-7]/.test(found)) tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
399 else tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
404 requires("../lib/JSDOC/TextStream.js");
405 requires("../lib/JSDOC/Token.js");
406 requires("../lib/JSDOC/Lang.js");
408 plan(3, "testing read_numb");
411 var src = "function foo(num){while (num+8.0 >= 0x20 && num < 0777){}}";
412 var tr = new TokenReader();
413 var tokens = tr.tokenize(new TextStream(src));
415 var hexToken, octToken, decToken;
416 for (var i = 0; i < tokens.length; i++) {
417 if (tokens[i].name == "HEX_DEC") hexToken = tokens[i];
418 if (tokens[i].name == "OCTAL") octToken = tokens[i];
419 if (tokens[i].name == "DECIMAL") decToken = tokens[i];
423 is(decToken.data, "8.0", "decimal number is found in source.");
424 is(hexToken.data, "0x20", "hexdec number is found in source (issue #99).");
425 is(octToken.data, "0777", "octal number is found in source.");
429 @returns {Boolean} Was the token found?
431 read_hex : function(/**JSDOC.TokenStream*/stream, tokens) {
432 var found = stream.next(2);
434 while (!stream.look().eof) {
435 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look())) { // done
436 tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
440 found += stream.next();
447 @returns {Boolean} Was the token found?
449 read_regx : function(/**JSDOC.TokenStream*/stream, tokens) {
457 !(last = tokens.lastSym()) // there is no last, the regex is the first symbol
462 && !last.is("RIGHT_PAREN")
463 && !last.is("RIGHT_BRACKET")
468 var regex = stream.next();
470 while (!stream.look().eof) {
471 if (stream.look() == "\\") { // escape sequence
472 regex += stream.next(2);
474 else if (stream.look() == "/") {
475 regex += stream.next();
477 while (/[gmi]/.test(stream.look())) {
478 regex += stream.next();
481 tokens.push(new Token(regex, "REGX", "REGX", this.line));
485 regex += stream.next();
488 // error: unterminated regex