1 //<script type="text/javascript">
6 //const Token = imports.Token.Token;
7 //const Lang = imports.Lang.Lang;
10 @class Search a {@link JSDOC.TextStream} for language tokens.
15 public class TokenArray: Object {
17 public Gee.ArrayList<Token> tokens;
19 get { return this.tokens.size; }
24 this.tokens = new Gee.ArrayList<Token>();
27 public Token? last() {
28 if (this.tokens.size > 0) {
29 return this.tokens.get(this.tokens.size-1);
33 public Token? lastSym () {
34 for (var i = this.tokens.size-1; i >= 0; i--) {
35 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM"))) {
36 return this.tokens.get(i);
41 public void push (Token t) {
46 if (this.tokens.size > 0) {
47 return this.tokens.remove_at(this.tokens.size-1);
52 public new Token get(int i) {
53 return this.tokens.get(i);
57 foreach(var token in this.tokens) {
58 print(token.asString() +"\n");
64 public errordomain TokenReader_Error {
69 public class TokenReader : Object
76 * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
79 /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
80 public bool collapseWhite = false; // only reduces white space...
81 /** @cfg {Boolean} keepDocs keep JSDOC comments **/
82 public bool keepDocs = true;
83 /** @cfg {Boolean} keepWhite keep White space **/
84 public bool keepWhite = false;
85 /** @cfg {Boolean} keepComments keep all comments **/
86 public bool keepComments = false;
87 /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
88 public bool sepIdents = false;
89 /** @cfg {String} filename name of file being parsed. **/
90 public string filename = "";
91 /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
92 public bool ignoreBadGrammer = false;
99 * @return {Array} of tokens
101 * ts = new TextStream(File.read(str));
102 * tr = TokenReader({ keepComments : true, keepWhite : true });
106 public TokenArray tokenize(TextStream stream)
109 var tokens = new TokenArray();
112 while (!stream.lookEOF()) {
115 if (this.read_mlcomment(stream, tokens)) continue;
116 if (this.read_slcomment(stream, tokens)) continue;
117 if (this.read_dbquote(stream, tokens)) continue;
118 if (this.read_snquote(stream, tokens)) continue;
119 if (this.read_regx(stream, tokens)) continue;
120 if (this.read_numb(stream, tokens)) continue;
121 if (this.read_punc(stream, tokens)) continue;
122 if (this.read_newline(stream, tokens)) continue;
123 if (this.read_space(stream, tokens)) continue;
124 if (this.read_word(stream, tokens)) continue;
126 // if execution reaches here then an error has happened
128 new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
138 * findPuncToken - find the id of a token (previous to current)
139 * need to back check syntax..
141 * @arg {Array} tokens the array of tokens.
142 * @arg {String} token data (eg. '(')
143 * @arg {Number} offset where to start reading from
144 * @return {Number} position of token
146 public int findPuncToken(TokenArray tokens, string data, int n)
148 n = n > 0 ? n : tokens.length -1;
152 if (stack < 1 && tokens.get(n).data == data) {
156 if (tokens.get(n).data == ")" || tokens.get(n).data == "}") {
161 if (stack > 0 && (tokens.get(n).data == "{" || tokens.get(n).data == "(")) {
173 * lastSym - find the last token symbol
174 * need to back check syntax..
176 * @arg {Array} tokens the array of tokens.
177 * @arg {Number} offset where to start..
178 * @return {Token} the token
180 public Token? lastSym(TokenArray tokens, int n)
182 for (var i = n-1; i >= 0; i--) {
183 if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
184 return tokens.get(i);
193 @returns {Boolean} Was the token found?
195 public bool read_word (TextStream stream, TokenArray tokens)
198 while (!stream.lookEOF() && Lang.isWordChar(stream.look().to_string())) {
199 found += stream.next();
206 var name = Lang.keyword(found);
209 // look for "()return" ?? why ???
210 var ls = tokens.lastSym();
211 if (found == "return" && ls != null && ls.data == ")") {
212 //Seed.print('@' + tokens.length);
213 var n = this.findPuncToken(tokens, ")", 0);
214 //Seed.print(')@' + n);
215 n = this.findPuncToken(tokens, "(", n-1);
216 //Seed.print('(@' + n);
218 //var lt = this.lastSym(tokens, n);
220 //print(JSON.stringify(lt));
221 if (lt.type != "KEYW" || ["IF", 'WHILE'].indexOf(lt.name) < -1) {
222 if (!this.ignoreBadGrammer) {
223 throw new TokenReader_Error.ArgumentError(
224 this.filename + ":" + this.line + " Error - return found after )"
233 tokens.push(new Token(found, "KEYW", name, this.line));
237 if (!this.sepIdents || found.index_of(".") < 0 ) {
238 tokens.push(new Token(found, "NAME", "NAME", this.line));
241 var n = found.split(".");
243 foreach (unowned string nm in n) {
245 tokens.push(new Token(".", "PUNC", "DOT", this.line));
248 tokens.push(new Token(nm, "NAME", "NAME", this.line));
256 @returns {Boolean} Was the token found?
258 public bool read_punc (TextStream stream, TokenArray tokens) throws TokenReader_Error
262 while (!stream.lookEOF()) {
263 var ns = stream.look().to_string();
265 if (null == Lang.punc(found + ns )) {
268 found += stream.next();
276 var ls = tokens.lastSym();
278 if ((found == "}" || found == "]") && ls != null && ls.data == ",") {
279 //print("Error - comma found before " + found);
280 //print(JSON.stringify(tokens.lastSym(), null,4));
281 if (this.ignoreBadGrammer) {
282 print("\n" + this.filename + ":" + this.line.to_string() + " Error - comma found before " + found);
284 throw new TokenReader_Error.ArgumentError(
285 this.filename + ":" + this.line.to_string() + " comma found before " + found
292 tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
298 @returns {Boolean} Was the token found?
300 public bool read_space (TextStream stream, TokenArray tokens)
304 while (!stream.lookEOF() && Lang.isSpaceC( stream.look()) && !Lang.isNewlineC(stream.look())) {
305 found += stream.next();
311 //print("WHITE = " + JSON.stringify(found));
314 if (this.collapseWhite) {
315 found = " "; // this might work better if it was a '\n' ???
317 if (this.keepWhite) {
318 tokens.push(new Token(found, "WHIT", "SPACE", this.line));
325 @returns {Boolean} Was the token found?
327 public bool read_newline (TextStream stream, TokenArray tokens)
330 var line = this.line;
331 while (!stream.lookEOF() && Lang.isNewlineC(stream.look())) {
333 found += stream.next();
340 // if we found a new line, then we could check if previous character was a ';' - if so we can drop it.
341 // otherwise generally keep it.. in which case it should reduce our issue with stripping new lines..
345 if (this.collapseWhite) {
346 found = "\n"; // reduces multiple line breaks into a single one...
349 if (this.keepWhite) {
350 var last = tokens.pop();
351 if (last != null && last.name != "WHIT") {
354 // replaces last new line...
355 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
361 @returns {Boolean} Was the token found?
363 public bool read_mlcomment (TextStream stream, TokenArray tokens)
365 if (stream.look() != '/') {
368 if (stream.look(1) != '*') {
371 var found = stream.next(2);
373 var line = this.line;
374 while (!stream.lookEOF() && !(stream.look(-1) == '/' && stream.look(-2) == '*')) {
382 // to start doclet we allow /** or /*** but not /**/ or /****
383 //if (found.length /^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) {
384 if (this.keepDocs && found.length > 4 && found.index_of("/**") == 0 && found[3] != '/') {
385 tokens.push(new Token(found, "COMM", "JSDOC", this.line));
386 } else if (this.keepComments) {
387 tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
394 @returns {Boolean} Was the token found?
396 public bool read_slcomment (TextStream stream, TokenArray tokens)
400 (stream.look() == '/' && stream.look(1) == '/' && (""!=(found=stream.next(2))))
402 (stream.look() == '<' && stream.look(1) == '!' && stream.look(2) == '-' && stream.look(3) == '-' && (""!=(found=stream.next(4))))
404 var line = this.line;
405 while (!stream.lookEOF()) {
406 //print(stream.look().to_string());
407 if ( Lang.isNewline(stream.look().to_string())) {
410 found += stream.next();
412 if (!stream.lookEOF()) { // lookinng for end of line... if we got it, then do not eat the character..
413 found += stream.next();
415 if (this.keepComments) {
416 tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
425 @returns {Boolean} Was the token found?
427 public bool read_dbquote (TextStream stream, TokenArray tokens)
429 if (stream.look() != '"') {
433 var str = stream.next();
435 while (!stream.lookEOF()) {
436 if (stream.look() == '\\') {
437 if (Lang.isNewline(stream.look(1).to_string())) {
440 } while (!stream.lookEOF() && Lang.isNewline(stream.look().to_string()));
444 str += stream.next(2);
448 if (stream.look() == '"') {
449 str += stream.next();
450 tokens.push(new Token(str, "STRN", "DOUBLE_QUOTE", this.line));
454 str += stream.next();
461 @returns {Boolean} Was the token found?
463 public bool read_snquote (TextStream stream, TokenArray tokens)
465 if (stream.look() != '\'') {
469 var str = stream.next();
471 while (!stream.lookEOF()) {
472 if (stream.look() == '\\') { // escape sequence
473 str += stream.next(2);
476 if (stream.look() == '\'') {
477 str += stream.next();
478 tokens.push(new Token(str, "STRN", "SINGLE_QUOTE", this.line));
481 str += stream.next();
489 @returns {Boolean} Was the token found?
491 public bool read_numb (TextStream stream, TokenArray tokens)
493 if (stream.look() == '0' && stream.look(1) == 'x') {
494 return this.read_hex(stream, tokens);
499 while (!stream.lookEOF() && Lang.isNumber(found+stream.look().to_string())){
500 found += stream.next();
506 if (GLib.Regex.match_simple("^0[0-7]", found)) {
507 tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
510 tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
516 @returns {Boolean} Was the token found?
518 public bool read_hex (TextStream stream, TokenArray tokens)
520 var found = stream.next(2);
522 while (!stream.lookEOF()) {
523 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look().to_string())) { // done
524 tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
528 found += stream.next();
535 @returns {Boolean} Was the token found?
537 public bool read_regx (TextStream stream, TokenArray tokens)
540 if (stream.look() != '/') {
543 var last = tokens.lastSym();
548 !last.is("NUMB") // stuff that can not appear before a regex..
550 && !last.is("RIGHT_PAREN")
551 && !last.is("RIGHT_BRACKET")
554 var regex = stream.next();
556 while (!stream.lookEOF()) {
557 if (stream.look() == '\\') { // escape sequence
558 regex += stream.next(2);
561 if (stream.look() == '/') {
562 regex += stream.next();
564 while (GLib.Regex.match_simple("[gmi]", stream.look().to_string())) {
565 regex += stream.next();
568 tokens.push(new Token(regex, "REGX", "REGX", this.line));
572 regex += stream.next();
575 // error: unterminated regex