1 //<script type="text/javascript">
7 var lc = new JSDOC.Lang_Class ();
8 var tr = new JSDOC.TokenReader();
11 tr.keepComments = true;
13 tr.collapseWhite = false;
16 FileUtils.get_contents("/home/alan/gitlive/gnome.introspection-doc-generator/JSDOC/Walker2.js", out str);
18 var toks = tr.tokenize(new JSDOC.TextStream(str)); // dont merge xxx + . + yyyy etc.
22 //const Token = imports.Token.Token;
23 //const Lang = imports.Lang.Lang;
26 @class Search a {@link JSDOC.TextStream} for language tokens.
34 public class TokenArray: Object {
36 public Gee.ArrayList<Token> tokens;
38 get { return this.tokens.size; }
43 this.tokens = new Gee.ArrayList<Token>();
46 public Token? last() {
47 if (this.tokens.size > 0) {
48 return this.tokens.get(this.tokens.size-1);
52 public Token? lastSym () {
53 for (var i = this.tokens.size-1; i >= 0; i--) {
54 if (!(this.tokens.get(i).is("WHIT") || this.tokens.get(i).is("COMM"))) {
55 return this.tokens.get(i);
60 public void push (Token t) {
65 if (this.tokens.size > 0) {
66 return this.tokens.remove_at(this.tokens.size-1);
71 public new Token get(int i) {
72 return this.tokens.get(i);
76 foreach(var token in this.tokens) {
77 print(token.asString() +"\n");
83 public errordomain TokenReader_Error {
88 public class TokenReader : Object
95 * I wonder if this will accept the prop: value, prop2 :value construxtor if we do not define one...
98 /** @cfg {Boolean} collapseWhite merge multiple whitespace/comments into a single token **/
99 public bool collapseWhite = false; // only reduces white space...
100 /** @cfg {Boolean} keepDocs keep JSDOC comments **/
101 public bool keepDocs = true;
102 /** @cfg {Boolean} keepWhite keep White space **/
103 public bool keepWhite = false;
104 /** @cfg {Boolean} keepComments keep all comments **/
105 public bool keepComments = false;
106 /** @cfg {Boolean} sepIdents seperate identifiers (eg. a.b.c into ['a', '.', 'b', '.', 'c'] ) **/
107 public bool sepIdents = false;
108 /** @cfg {String} filename name of file being parsed. **/
109 public string filename = "";
110 /** @config {Boolean} ignoreBadGrammer do not throw errors if we find stuff that might break compression **/
111 public bool ignoreBadGrammer = false;
118 * @return {Array} of tokens
120 * ts = new TextStream(File.read(str));
121 * tr = TokenReader({ keepComments : true, keepWhite : true });
125 public TokenArray tokenize(TextStream stream)
128 var tokens = new TokenArray();
131 while (!stream.lookEOF()) {
134 if (this.read_mlcomment(stream, tokens)) continue;
135 if (this.read_slcomment(stream, tokens)) continue;
136 if (this.read_dbquote(stream, tokens)) continue;
137 if (this.read_snquote(stream, tokens)) continue;
138 if (this.read_regx(stream, tokens)) continue;
139 if (this.read_numb(stream, tokens)) continue;
140 if (this.read_punc(stream, tokens)) continue;
141 if (this.read_newline(stream, tokens)) continue;
142 if (this.read_space(stream, tokens)) continue;
143 if (this.read_word(stream, tokens)) continue;
145 // if execution reaches here then an error has happened
147 new Token(stream.next(), "TOKN", "UNKNOWN_TOKEN", this.line)
157 * findPuncToken - find the id of a token (previous to current)
158 * need to back check syntax..
160 * @arg {Array} tokens the array of tokens.
161 * @arg {String} token data (eg. '(')
162 * @arg {Number} offset where to start reading from
163 * @return {Number} position of token
165 public int findPuncToken(TokenArray tokens, string data, int n)
167 n = n > 0 ? n : tokens.length -1;
171 if (stack < 1 && tokens.get(n).data == data) {
175 if (tokens.get(n).data == ")" || tokens.get(n).data == "}") {
180 if (stack > 0 && (tokens.get(n).data == "{" || tokens.get(n).data == "(")) {
192 * lastSym - find the last token symbol
193 * need to back check syntax..
195 * @arg {Array} tokens the array of tokens.
196 * @arg {Number} offset where to start..
197 * @return {Token} the token
199 public Token? lastSym(TokenArray tokens, int n)
201 for (var i = n-1; i >= 0; i--) {
202 if (!(tokens.get(i).is("WHIT") || tokens.get(i).is("COMM"))) {
203 return tokens.get(i);
212 @returns {Boolean} Was the token found?
214 public bool read_word (TextStream stream, TokenArray tokens)
217 while (!stream.lookEOF() && Lang.isWordChar(stream.look().to_string())) {
218 found += stream.next();
225 var name = Lang.keyword(found);
228 // look for "()return" ?? why ???
229 var ls = tokens.lastSym();
230 if (found == "return" && ls != null && ls.data == ")") {
231 //Seed.print('@' + tokens.length);
232 var n = this.findPuncToken(tokens, ")", 0);
233 //Seed.print(')@' + n);
234 n = this.findPuncToken(tokens, "(", n-1);
235 //Seed.print('(@' + n);
237 //var lt = this.lastSym(tokens, n);
239 //print(JSON.stringify(lt));
240 if (lt.type != "KEYW" || ["IF", 'WHILE'].indexOf(lt.name) < -1) {
241 if (!this.ignoreBadGrammer) {
242 throw new TokenReader_Error.ArgumentError(
243 this.filename + ":" + this.line + " Error - return found after )"
252 tokens.push(new Token(found, "KEYW", name, this.line));
256 if (!this.sepIdents || found.index_of(".") < 0 ) {
257 tokens.push(new Token(found, "NAME", "NAME", this.line));
260 var n = found.split(".");
262 foreach (unowned string nm in n) {
264 tokens.push(new Token(".", "PUNC", "DOT", this.line));
267 tokens.push(new Token(nm, "NAME", "NAME", this.line));
275 @returns {Boolean} Was the token found?
277 public bool read_punc (TextStream stream, TokenArray tokens) throws TokenReader_Error
281 while (!stream.lookEOF()) {
282 var ns = stream.look().to_string();
284 if (null == Lang.punc(found + ns )) {
287 found += stream.next();
295 var ls = tokens.lastSym();
297 if ((found == "}" || found == "]") && ls != null && ls.data == ",") {
298 //print("Error - comma found before " + found);
299 //print(JSON.stringify(tokens.lastSym(), null,4));
300 if (this.ignoreBadGrammer) {
301 print("\n" + this.filename + ":" + this.line.to_string() + " Error - comma found before " + found);
303 throw new TokenReader_Error.ArgumentError(
304 this.filename + ":" + this.line.to_string() + " comma found before " + found
311 tokens.push(new Token(found, "PUNC", Lang.punc(found), this.line));
317 @returns {Boolean} Was the token found?
319 public bool read_space (TextStream stream, TokenArray tokens)
323 while (!stream.lookEOF() && Lang.isSpaceC( stream.look()) && !Lang.isNewlineC(stream.look())) {
324 found += stream.next();
330 //print("WHITE = " + JSON.stringify(found));
333 if (this.collapseWhite) {
334 found = " "; // this might work better if it was a '\n' ???
336 if (this.keepWhite) {
337 tokens.push(new Token(found, "WHIT", "SPACE", this.line));
344 @returns {Boolean} Was the token found?
346 public bool read_newline (TextStream stream, TokenArray tokens)
349 var line = this.line;
350 while (!stream.lookEOF() && Lang.isNewlineC(stream.look())) {
352 found += stream.next();
359 // if we found a new line, then we could check if previous character was a ';' - if so we can drop it.
360 // otherwise generally keep it.. in which case it should reduce our issue with stripping new lines..
364 if (this.collapseWhite) {
365 found = "\n"; // reduces multiple line breaks into a single one...
368 if (this.keepWhite) {
369 var last = tokens.pop();
370 if (last != null && last.name != "WHIT") {
373 // replaces last new line...
374 tokens.push(new Token(found, "WHIT", "NEWLINE", line));
380 @returns {Boolean} Was the token found?
382 public bool read_mlcomment (TextStream stream, TokenArray tokens)
384 if (stream.look() != '/') {
387 if (stream.look(1) != '*') {
390 var found = stream.next(2);
392 var line = this.line;
393 while (!stream.lookEOF() && !(stream.look(-1) == '/' && stream.look(-2) == '*')) {
401 // to start doclet we allow /** or /*** but not /**/ or /****
402 //if (found.length /^\/\*\*([^\/]|\*[^*])/.test(found) && this.keepDocs) {
403 if (this.keepDocs && found.length > 4 && found.index_of("/**") == 0 && found[3] != '/') {
404 tokens.push(new Token(found, "COMM", "JSDOC", this.line));
405 } else if (this.keepComments) {
406 tokens.push(new Token(found, "COMM", "MULTI_LINE_COMM", line));
413 @returns {Boolean} Was the token found?
415 public bool read_slcomment (TextStream stream, TokenArray tokens)
419 (stream.look() == '/' && stream.look(1) == '/' && (""!=(found=stream.next(2))))
421 (stream.look() == '<' && stream.look(1) == '!' && stream.look(2) == '-' && stream.look(3) == '-' && (""!=(found=stream.next(4))))
423 var line = this.line;
424 while (!stream.lookEOF()) {
425 //print(stream.look().to_string());
426 if ( Lang.isNewline(stream.look().to_string())) {
429 found += stream.next();
431 if (!stream.lookEOF()) { // lookinng for end of line... if we got it, then do not eat the character..
432 found += stream.next();
434 if (this.keepComments) {
435 tokens.push(new Token(found, "COMM", "SINGLE_LINE_COMM", line));
444 @returns {Boolean} Was the token found?
446 public bool read_dbquote (TextStream stream, TokenArray tokens)
448 if (stream.look() != '"') {
452 var str = stream.next();
454 while (!stream.lookEOF()) {
455 if (stream.look() == '\\') {
456 if (Lang.isNewline(stream.look(1).to_string())) {
459 } while (!stream.lookEOF() && Lang.isNewline(stream.look().to_string()));
463 str += stream.next(2);
467 if (stream.look() == '"') {
468 str += stream.next();
469 tokens.push(new Token(str, "STRN", "DOUBLE_QUOTE", this.line));
473 str += stream.next();
480 @returns {Boolean} Was the token found?
482 public bool read_snquote (TextStream stream, TokenArray tokens)
484 if (stream.look() != '\'') {
488 var str = stream.next();
490 while (!stream.lookEOF()) {
491 if (stream.look() == '\\') { // escape sequence
492 str += stream.next(2);
495 if (stream.look() == '\'') {
496 str += stream.next();
497 tokens.push(new Token(str, "STRN", "SINGLE_QUOTE", this.line));
500 str += stream.next();
508 @returns {Boolean} Was the token found?
510 public bool read_numb (TextStream stream, TokenArray tokens)
512 if (stream.look() == '0' && stream.look(1) == 'x') {
513 return this.read_hex(stream, tokens);
518 while (!stream.lookEOF() && Lang.isNumber(found+stream.look().to_string())){
519 found += stream.next();
525 if (GLib.Regex.match_simple("^0[0-7]", found)) {
526 tokens.push(new Token(found, "NUMB", "OCTAL", this.line));
529 tokens.push(new Token(found, "NUMB", "DECIMAL", this.line));
535 @returns {Boolean} Was the token found?
537 public bool read_hex (TextStream stream, TokenArray tokens)
539 var found = stream.next(2);
541 while (!stream.lookEOF()) {
542 if (Lang.isHexDec(found) && !Lang.isHexDec(found+stream.look().to_string())) { // done
543 tokens.push(new Token(found, "NUMB", "HEX_DEC", this.line));
547 found += stream.next();
554 @returns {Boolean} Was the token found?
556 public bool read_regx (TextStream stream, TokenArray tokens)
559 if (stream.look() != '/') {
562 var last = tokens.lastSym();
567 !last.is("NUMB") // stuff that can not appear before a regex..
569 && !last.is("RIGHT_PAREN")
570 && !last.is("RIGHT_BRACKET")
573 var regex = stream.next();
575 while (!stream.lookEOF()) {
576 if (stream.look() == '\\') { // escape sequence
577 regex += stream.next(2);
580 if (stream.look() == '/') {
581 regex += stream.next();
583 while (GLib.Regex.match_simple("[gmi]", stream.look().to_string())) {
584 regex += stream.next();
587 tokens.push(new Token(regex, "REGX", "REGX", this.line));
591 regex += stream.next();
594 // error: unterminated regex