From 0f0db8f3a92e1a1d7d09621c60f0345088bebb6c Mon Sep 17 00:00:00 2001 From: David Anthony Bau Date: Wed, 20 Jul 2016 21:05:14 -0400 Subject: [PATCH] Add automatic graph generation tool --- tools/grammar-analysis/ANTLRv4Lexer.py | 624 +++ tools/grammar-analysis/ANTLRv4Parser.py | 4765 +++++++++++++++++ .../grammar-analysis/ANTLRv4ParserListener.py | 573 ++ tools/grammar-analysis/LexBasic.py | 175 + tools/grammar-analysis/LexerAdaptor.py | 66 + tools/grammar-analysis/README.md | 6 + tools/grammar-analysis/generate.py | 257 + .../grammar-analysis/grammar/ANTLRv4Lexer.g4 | 347 ++ .../grammar/ANTLRv4Lexer.tokens | 78 + .../grammar-analysis/grammar/ANTLRv4Parser.g4 | 379 ++ .../grammar/ANTLRv4Parser.tokens | 78 + tools/grammar-analysis/grammar/LexBasic.g4 | 302 ++ .../grammar-analysis/grammar/LexBasic.tokens | 0 tools/grammar-analysis/grammar/three.g4 | 3 + tools/grammar-analysis/pom.xml | 62 + 15 files changed, 7715 insertions(+) create mode 100644 tools/grammar-analysis/ANTLRv4Lexer.py create mode 100644 tools/grammar-analysis/ANTLRv4Parser.py create mode 100644 tools/grammar-analysis/ANTLRv4ParserListener.py create mode 100644 tools/grammar-analysis/LexBasic.py create mode 100644 tools/grammar-analysis/LexerAdaptor.py create mode 100644 tools/grammar-analysis/README.md create mode 100644 tools/grammar-analysis/generate.py create mode 100644 tools/grammar-analysis/grammar/ANTLRv4Lexer.g4 create mode 100644 tools/grammar-analysis/grammar/ANTLRv4Lexer.tokens create mode 100644 tools/grammar-analysis/grammar/ANTLRv4Parser.g4 create mode 100644 tools/grammar-analysis/grammar/ANTLRv4Parser.tokens create mode 100644 tools/grammar-analysis/grammar/LexBasic.g4 create mode 100644 tools/grammar-analysis/grammar/LexBasic.tokens create mode 100644 tools/grammar-analysis/grammar/three.g4 create mode 100644 tools/grammar-analysis/pom.xml diff --git a/tools/grammar-analysis/ANTLRv4Lexer.py b/tools/grammar-analysis/ANTLRv4Lexer.py new file mode 100644 index 00000000..53783b77 --- /dev/null +++ b/tools/grammar-analysis/ANTLRv4Lexer.py @@ -0,0 +1,624 @@ +# Generated from java-escape by ANTLR 4.5 +from antlr4 import * +from io import StringIO + + +from LexerAdaptor import LexerAdaptor + + +def serializedATN(): + with StringIO() as buf: + buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2?") + buf.write("\u03c6\b\1\b\1\b\1\b\1\b\1\b\1\b\1\4\2\t\2\4\3\t\3\4\4") + buf.write("\t\4\4\5\t\5\4\6\t\6\4\7\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4") + buf.write("\13\t\13\4\f\t\f\4\r\t\r\4\16\t\16\4\17\t\17\4\20\t\20") + buf.write("\4\21\t\21\4\22\t\22\4\23\t\23\4\24\t\24\4\25\t\25\4\26") + buf.write("\t\26\4\27\t\27\4\30\t\30\4\31\t\31\4\32\t\32\4\33\t\33") + buf.write("\4\34\t\34\4\35\t\35\4\36\t\36\4\37\t\37\4 \t \4!\t!\4") + buf.write("\"\t\"\4#\t#\4$\t$\4%\t%\4&\t&\4\'\t\'\4(\t(\4)\t)\4*") + buf.write("\t*\4+\t+\4,\t,\4-\t-\4.\t.\4/\t/\4\60\t\60\4\61\t\61") + buf.write("\4\62\t\62\4\63\t\63\4\64\t\64\4\65\t\65\4\66\t\66\4\67") + buf.write("\t\67\48\t8\49\t9\4:\t:\4;\t;\4<\t<\4=\t=\4>\t>\4?\t?") + buf.write("\4@\t@\4A\tA\4B\tB\4C\tC\4D\tD\4E\tE\4F\tF\4G\tG\4H\t") + buf.write("H\4I\tI\4J\tJ\4K\tK\4L\tL\4M\tM\4N\tN\4O\tO\4P\tP\4Q\t") + buf.write("Q\4R\tR\4S\tS\4T\tT\4U\tU\4V\tV\4W\tW\4X\tX\4Y\tY\4Z\t") + buf.write("Z\4[\t[\4\\\t\\\4]\t]\4^\t^\4_\t_\4`\t`\4a\ta\4b\tb\4") + buf.write("c\tc\4d\td\4e\te\4f\tf\4g\tg\4h\th\4i\ti\4j\tj\4k\tk\4") + buf.write("l\tl\4m\tm\4n\tn\4o\to\4p\tp\4q\tq\4r\tr\4s\ts\4t\tt\4") + buf.write("u\tu\4v\tv\4w\tw\4x\tx\4y\ty\4z\tz\4{\t{\4|\t|\4}\t}\4") + buf.write("~\t~\4\177\t\177\4\u0080\t\u0080\4\u0081\t\u0081\4\u0082") + buf.write("\t\u0082\4\u0083\t\u0083\4\u0084\t\u0084\4\u0085\t\u0085") + buf.write("\4\u0086\t\u0086\4\u0087\t\u0087\4\u0088\t\u0088\4\u0089") + buf.write("\t\u0089\4\u008a\t\u008a\4\u008b\t\u008b\4\u008c\t\u008c") + buf.write("\4\u008d\t\u008d\4\u008e\t\u008e\4\u008f\t\u008f\4\u0090") + buf.write("\t\u0090\4\u0091\t\u0091\4\u0092\t\u0092\4\u0093\t\u0093") + buf.write("\4\u0094\t\u0094\4\u0095\t\u0095\4\u0096\t\u0096\4\u0097") + buf.write("\t\u0097\4\u0098\t\u0098\4\u0099\t\u0099\3\2\3\2\3\3\3") + buf.write("\3\3\3\3\3\3\4\3\4\3\4\3\4\3\5\3\5\3\6\3\6\3\7\3\7\3\b") + buf.write("\3\b\3\b\3\t\3\t\3\t\3\t\3\n\3\n\3\n\3\n\3\n\3\n\3\n\3") + buf.write("\n\3\n\3\n\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13\3\13") + buf.write("\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\f\3\r\3\r\3") + buf.write("\r\3\r\3\r\3\r\3\r\3\16\3\16\3\16\3\16\3\16\3\16\3\16") + buf.write("\3\16\3\16\3\17\3\17\3\17\3\17\3\17\3\17\3\20\3\20\3\20") + buf.write("\3\20\3\20\3\20\3\20\3\21\3\21\3\21\3\21\3\21\3\21\3\21") + buf.write("\3\21\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22\3\22") + buf.write("\3\23\3\23\3\23\3\23\3\23\3\23\3\23\3\24\3\24\3\24\3\24") + buf.write("\3\24\3\24\3\24\3\24\3\25\3\25\3\25\3\25\3\25\3\25\3\25") + buf.write("\3\25\3\26\3\26\3\26\3\26\3\26\3\26\3\26\3\27\3\27\3\27") + buf.write("\3\27\3\27\3\27\3\27\3\30\3\30\3\30\3\30\3\30\3\30\3\31") + buf.write("\3\31\3\31\3\31\3\31\3\31\3\31\3\31\3\32\3\32\3\32\3\32") + buf.write("\3\32\3\33\3\33\3\34\3\34\3\35\3\35\3\36\3\36\3\37\3\37") + buf.write("\3 \3 \3!\3!\3\"\3\"\3#\3#\3$\3$\3%\3%\3&\3&\3\'\3\'\3") + buf.write("(\3(\3)\3)\3*\3*\3+\3+\3,\3,\3-\3-\3.\3.\3/\3/\3\60\3") + buf.write("\60\3\61\3\61\3\62\3\62\3\63\6\63\u0207\n\63\r\63\16\63") + buf.write("\u0208\3\63\3\63\3\64\3\64\3\64\3\64\3\65\3\65\5\65\u0213") + buf.write("\n\65\3\66\3\66\3\67\3\67\38\38\38\38\78\u021d\n8\f8\16") + buf.write("8\u0220\138\38\38\38\58\u0225\n8\39\39\39\39\39\79\u022c") + buf.write("\n9\f9\169\u022f\139\39\39\39\59\u0234\n9\3:\3:\3:\3:") + buf.write("\7:\u023a\n:\f:\16:\u023d\13:\3;\3;\3;\3;\3;\5;\u0244") + buf.write("\n;\3<\3<\3<\3=\3=\3=\3=\3=\5=\u024e\n=\5=\u0250\n=\5") + buf.write("=\u0252\n=\5=\u0254\n=\3>\3>\3>\7>\u0259\n>\f>\16>\u025c") + buf.write("\13>\5>\u025e\n>\3?\3?\3@\3@\3A\3A\3A\3A\3A\3A\3A\3A\3") + buf.write("A\5A\u026d\nA\3B\3B\3B\5B\u0272\nB\3B\3B\3C\3C\3C\7C\u0279") + buf.write("\nC\fC\16C\u027c\13C\3C\3C\3D\3D\3D\7D\u0283\nD\fD\16") + buf.write("D\u0286\13D\3D\3D\3E\3E\3E\7E\u028d\nE\fE\16E\u0290\13") + buf.write("E\3F\3F\3F\3F\5F\u0296\nF\3G\3G\3H\3H\3H\3H\3I\3I\3J\3") + buf.write("J\3K\3K\3K\3L\3L\3M\3M\3N\3N\3O\3O\3P\3P\3Q\3Q\3R\3R\3") + buf.write("S\3S\3T\3T\3T\3U\3U\3V\3V\3W\3W\3X\3X\3Y\3Y\3Z\3Z\3[\3") + buf.write("[\3[\3\\\3\\\3]\3]\3^\3^\3_\3_\3`\3`\3a\3a\3b\3b\3b\3") + buf.write("c\3c\3d\3d\3e\3e\3f\3f\3f\3f\3f\3g\3g\3g\3g\3h\3h\3h\3") + buf.write("h\3i\3i\3i\3i\3j\3j\3j\3k\3k\3k\3k\3l\3l\3m\3m\3m\3m\3") + buf.write("m\3n\3n\3n\3n\3o\3o\3o\3o\3p\3p\3p\3p\3q\3q\3q\3q\3r\3") + buf.write("r\3r\3r\3s\3s\3s\3s\3t\3t\3t\3u\3u\3u\3u\3v\3v\3w\3w\3") + buf.write("w\3w\3w\3x\3x\3x\3x\3x\3y\3y\3y\3y\3y\3z\3z\3z\3z\3{\3") + buf.write("{\3{\3{\3{\3|\3|\3|\3|\3}\3}\3}\3}\3~\3~\3~\3~\3\177\3") + buf.write("\177\3\177\3\177\3\u0080\3\u0080\3\u0080\3\u0080\3\u0081") + buf.write("\3\u0081\3\u0081\3\u0081\3\u0082\3\u0082\3\u0082\3\u0082") + buf.write("\3\u0083\6\u0083\u0351\n\u0083\r\u0083\16\u0083\u0352") + buf.write("\3\u0083\3\u0083\3\u0083\3\u0084\3\u0084\3\u0084\3\u0084") + buf.write("\3\u0084\3\u0085\3\u0085\3\u0085\3\u0085\3\u0085\3\u0086") + buf.write("\3\u0086\3\u0086\3\u0086\3\u0086\3\u0087\3\u0087\3\u0087") + buf.write("\3\u0087\3\u0088\3\u0088\3\u0088\3\u0088\3\u0088\3\u0089") + buf.write("\3\u0089\3\u0089\3\u0089\3\u008a\3\u008a\3\u008a\3\u008a") + buf.write("\3\u008b\3\u008b\3\u008b\3\u008b\3\u008c\6\u008c\u037d") + buf.write("\n\u008c\r\u008c\16\u008c\u037e\3\u008c\3\u008c\3\u008c") + buf.write("\3\u008d\3\u008d\3\u008d\3\u008d\3\u008d\3\u008e\3\u008e") + buf.write("\3\u008e\3\u008e\3\u008e\3\u008f\3\u008f\3\u008f\3\u008f") + buf.write("\3\u008f\3\u0090\3\u0090\3\u0090\3\u0090\3\u0091\3\u0091") + buf.write("\3\u0091\3\u0091\3\u0091\3\u0092\3\u0092\3\u0092\3\u0092") + buf.write("\3\u0093\3\u0093\3\u0093\3\u0093\3\u0094\3\u0094\3\u0094") + buf.write("\3\u0094\3\u0095\6\u0095\u03a9\n\u0095\r\u0095\16\u0095") + buf.write("\u03aa\3\u0095\3\u0095\3\u0095\3\u0096\3\u0096\6\u0096") + buf.write("\u03b2\n\u0096\r\u0096\16\u0096\u03b3\3\u0096\3\u0096") + buf.write("\3\u0097\3\u0097\3\u0097\3\u0097\3\u0098\3\u0098\3\u0098") + buf.write("\3\u0098\3\u0099\3\u0099\7\u0099\u03c2\n\u0099\f\u0099") + buf.write("\16\u0099\u03c5\13\u0099\4\u021e\u022d\2\u009a\t\6\13") + buf.write("\7\r\b\17\t\21\n\23\13\25\f\27\r\31\16\33\17\35\20\37") + buf.write("\21!\22#\23%\24\'\25)\26+\27-\30/\31\61\32\63\33\65\34") + buf.write("\67\359\36;\37= ?!A\"C#E$G%I&K\'M(O)Q*S+U,W-Y.[/]\60_") + buf.write("\61a\62c\63e\64g\65i\66k\67m8o\2q\2s\2u\2w\2y\2{\2}\2") + buf.write("\177\2\u0081\2\u0083\2\u0085\2\u0087\2\u0089\2\u008b\2") + buf.write("\u008d\2\u008f\2\u0091\2\u0093\2\u0095\2\u0097\2\u0099") + buf.write("\2\u009b\2\u009d\2\u009f\2\u00a1\2\u00a3\2\u00a5\2\u00a7") + buf.write("\2\u00a9\2\u00ab\2\u00ad\2\u00af\2\u00b1\2\u00b3\2\u00b5") + buf.write("\2\u00b7\2\u00b9\2\u00bb\2\u00bd\2\u00bf\2\u00c1\2\u00c3") + buf.write("\2\u00c5\2\u00c7\2\u00c9\2\u00cb\2\u00cd\2\u00cf\2\u00d1") + buf.write("\2\u00d3\2\u00d5\2\u00d7\2\u00d99\u00db:\u00dd;\u00df") + buf.write("\2\u00e1\2\u00e3\2\u00e5\2\u00e7\2\u00e9\2\u00eb\2\u00ed") + buf.write("<\u00ef=\u00f1>\u00f3\2\u00f5\2\u00f7\2\u00f9\2\u00fb") + buf.write("\2\u00fd\2\u00ff\2\u0101\2\u0103\2\u0105\2\u0107\2\u0109") + buf.write("\2\u010b\2\u010d\2\u010f\2\u0111\2\u0113\2\u0115\2\u0117") + buf.write("\2\u0119\2\u011b\2\u011d\2\u011f\2\u0121\2\u0123\2\u0125") + buf.write("\2\u0127\2\u0129\2\u012b\2\u012d\2\u012f\2\u0131\2\u0133") + buf.write("\5\u0135?\u0137\2\t\2\3\4\5\6\7\b\16\4\2\13\13\"\"\4\2") + buf.write("\f\f\16\17\4\2\f\f\17\17\n\2$$))^^ddhhppttvv\3\2\63;\5") + buf.write("\2\62;CHch\3\2\62;\6\2\f\f\17\17))^^\6\2\f\f\17\17$$^") + buf.write("^\5\2\u00b9\u00b9\u0302\u0371\u2041\u2042\17\2C\\c|\u00c2") + buf.write("\u00d8\u00da\u00f8\u00fa\u0301\u0372\u037f\u0381\u2001") + buf.write("\u200e\u200f\u2072\u2191\u2c02\u2ff1\u3003\ud801\uf902") + buf.write("\ufdd1\ufdf2\uffff\3\2^_\u03ae\2\t\3\2\2\2\2\13\3\2\2") + buf.write("\2\2\r\3\2\2\2\2\17\3\2\2\2\2\21\3\2\2\2\2\23\3\2\2\2") + buf.write("\2\25\3\2\2\2\2\27\3\2\2\2\2\31\3\2\2\2\2\33\3\2\2\2\2") + buf.write("\35\3\2\2\2\2\37\3\2\2\2\2!\3\2\2\2\2#\3\2\2\2\2%\3\2") + buf.write("\2\2\2\'\3\2\2\2\2)\3\2\2\2\2+\3\2\2\2\2-\3\2\2\2\2/\3") + buf.write("\2\2\2\2\61\3\2\2\2\2\63\3\2\2\2\2\65\3\2\2\2\2\67\3\2") + buf.write("\2\2\29\3\2\2\2\2;\3\2\2\2\2=\3\2\2\2\2?\3\2\2\2\2A\3") + buf.write("\2\2\2\2C\3\2\2\2\2E\3\2\2\2\2G\3\2\2\2\2I\3\2\2\2\2K") + buf.write("\3\2\2\2\2M\3\2\2\2\2O\3\2\2\2\2Q\3\2\2\2\2S\3\2\2\2\2") + buf.write("U\3\2\2\2\2W\3\2\2\2\2Y\3\2\2\2\2[\3\2\2\2\2]\3\2\2\2") + buf.write("\2_\3\2\2\2\2a\3\2\2\2\2c\3\2\2\2\2e\3\2\2\2\2g\3\2\2") + buf.write("\2\2i\3\2\2\2\2k\3\2\2\2\2m\3\2\2\2\3\u00d1\3\2\2\2\3") + buf.write("\u00d3\3\2\2\2\3\u00d5\3\2\2\2\3\u00d7\3\2\2\2\3\u00d9") + buf.write("\3\2\2\2\3\u00db\3\2\2\2\3\u00dd\3\2\2\2\4\u00df\3\2\2") + buf.write("\2\4\u00e1\3\2\2\2\4\u00e3\3\2\2\2\4\u00e5\3\2\2\2\4\u00e7") + buf.write("\3\2\2\2\4\u00e9\3\2\2\2\4\u00eb\3\2\2\2\4\u00ed\3\2\2") + buf.write("\2\4\u00ef\3\2\2\2\4\u00f1\3\2\2\2\5\u00f3\3\2\2\2\5\u00f5") + buf.write("\3\2\2\2\5\u00f7\3\2\2\2\5\u00f9\3\2\2\2\5\u00fb\3\2\2") + buf.write("\2\5\u00fd\3\2\2\2\5\u00ff\3\2\2\2\5\u0101\3\2\2\2\5\u0103") + buf.write("\3\2\2\2\5\u0105\3\2\2\2\5\u0107\3\2\2\2\5\u0109\3\2\2") + buf.write("\2\5\u010b\3\2\2\2\6\u010d\3\2\2\2\6\u010f\3\2\2\2\6\u0111") + buf.write("\3\2\2\2\6\u0113\3\2\2\2\6\u0115\3\2\2\2\6\u0117\3\2\2") + buf.write("\2\6\u0119\3\2\2\2\6\u011b\3\2\2\2\6\u011d\3\2\2\2\7\u011f") + buf.write("\3\2\2\2\7\u0121\3\2\2\2\7\u0123\3\2\2\2\7\u0125\3\2\2") + buf.write("\2\7\u0127\3\2\2\2\7\u0129\3\2\2\2\7\u012b\3\2\2\2\7\u012d") + buf.write("\3\2\2\2\7\u012f\3\2\2\2\b\u0131\3\2\2\2\b\u0133\3\2\2") + buf.write("\2\b\u0135\3\2\2\2\t\u0139\3\2\2\2\13\u013b\3\2\2\2\r") + buf.write("\u013f\3\2\2\2\17\u0143\3\2\2\2\21\u0145\3\2\2\2\23\u0147") + buf.write("\3\2\2\2\25\u0149\3\2\2\2\27\u014c\3\2\2\2\31\u0150\3") + buf.write("\2\2\2\33\u015a\3\2\2\2\35\u0163\3\2\2\2\37\u016e\3\2") + buf.write("\2\2!\u0175\3\2\2\2#\u017e\3\2\2\2%\u0184\3\2\2\2\'\u018b") + buf.write("\3\2\2\2)\u0193\3\2\2\2+\u019d\3\2\2\2-\u01a4\3\2\2\2") + buf.write("/\u01ac\3\2\2\2\61\u01b4\3\2\2\2\63\u01bb\3\2\2\2\65\u01c2") + buf.write("\3\2\2\2\67\u01c8\3\2\2\29\u01d0\3\2\2\2;\u01d5\3\2\2") + buf.write("\2=\u01d7\3\2\2\2?\u01d9\3\2\2\2A\u01db\3\2\2\2C\u01dd") + buf.write("\3\2\2\2E\u01df\3\2\2\2G\u01e1\3\2\2\2I\u01e3\3\2\2\2") + buf.write("K\u01e5\3\2\2\2M\u01e7\3\2\2\2O\u01e9\3\2\2\2Q\u01eb\3") + buf.write("\2\2\2S\u01ed\3\2\2\2U\u01ef\3\2\2\2W\u01f1\3\2\2\2Y\u01f3") + buf.write("\3\2\2\2[\u01f5\3\2\2\2]\u01f7\3\2\2\2_\u01f9\3\2\2\2") + buf.write("a\u01fb\3\2\2\2c\u01fd\3\2\2\2e\u01ff\3\2\2\2g\u0201\3") + buf.write("\2\2\2i\u0203\3\2\2\2k\u0206\3\2\2\2m\u020c\3\2\2\2o\u0212") + buf.write("\3\2\2\2q\u0214\3\2\2\2s\u0216\3\2\2\2u\u0218\3\2\2\2") + buf.write("w\u0226\3\2\2\2y\u0235\3\2\2\2{\u023e\3\2\2\2}\u0245\3") + buf.write("\2\2\2\177\u0248\3\2\2\2\u0081\u025d\3\2\2\2\u0083\u025f") + buf.write("\3\2\2\2\u0085\u0261\3\2\2\2\u0087\u026c\3\2\2\2\u0089") + buf.write("\u026e\3\2\2\2\u008b\u0275\3\2\2\2\u008d\u027f\3\2\2\2") + buf.write("\u008f\u0289\3\2\2\2\u0091\u0295\3\2\2\2\u0093\u0297\3") + buf.write("\2\2\2\u0095\u0299\3\2\2\2\u0097\u029d\3\2\2\2\u0099\u029f") + buf.write("\3\2\2\2\u009b\u02a1\3\2\2\2\u009d\u02a4\3\2\2\2\u009f") + buf.write("\u02a6\3\2\2\2\u00a1\u02a8\3\2\2\2\u00a3\u02aa\3\2\2\2") + buf.write("\u00a5\u02ac\3\2\2\2\u00a7\u02ae\3\2\2\2\u00a9\u02b0\3") + buf.write("\2\2\2\u00ab\u02b2\3\2\2\2\u00ad\u02b4\3\2\2\2\u00af\u02b7") + buf.write("\3\2\2\2\u00b1\u02b9\3\2\2\2\u00b3\u02bb\3\2\2\2\u00b5") + buf.write("\u02bd\3\2\2\2\u00b7\u02bf\3\2\2\2\u00b9\u02c1\3\2\2\2") + buf.write("\u00bb\u02c3\3\2\2\2\u00bd\u02c6\3\2\2\2\u00bf\u02c8\3") + buf.write("\2\2\2\u00c1\u02ca\3\2\2\2\u00c3\u02cc\3\2\2\2\u00c5\u02ce") + buf.write("\3\2\2\2\u00c7\u02d0\3\2\2\2\u00c9\u02d2\3\2\2\2\u00cb") + buf.write("\u02d5\3\2\2\2\u00cd\u02d7\3\2\2\2\u00cf\u02d9\3\2\2\2") + buf.write("\u00d1\u02db\3\2\2\2\u00d3\u02e0\3\2\2\2\u00d5\u02e4\3") + buf.write("\2\2\2\u00d7\u02e8\3\2\2\2\u00d9\u02ec\3\2\2\2\u00db\u02ef") + buf.write("\3\2\2\2\u00dd\u02f3\3\2\2\2\u00df\u02f5\3\2\2\2\u00e1") + buf.write("\u02fa\3\2\2\2\u00e3\u02fe\3\2\2\2\u00e5\u0302\3\2\2\2") + buf.write("\u00e7\u0306\3\2\2\2\u00e9\u030a\3\2\2\2\u00eb\u030e\3") + buf.write("\2\2\2\u00ed\u0312\3\2\2\2\u00ef\u0315\3\2\2\2\u00f1\u0319") + buf.write("\3\2\2\2\u00f3\u031b\3\2\2\2\u00f5\u0320\3\2\2\2\u00f7") + buf.write("\u0325\3\2\2\2\u00f9\u032a\3\2\2\2\u00fb\u032e\3\2\2\2") + buf.write("\u00fd\u0333\3\2\2\2\u00ff\u0337\3\2\2\2\u0101\u033b\3") + buf.write("\2\2\2\u0103\u033f\3\2\2\2\u0105\u0343\3\2\2\2\u0107\u0347") + buf.write("\3\2\2\2\u0109\u034b\3\2\2\2\u010b\u0350\3\2\2\2\u010d") + buf.write("\u0357\3\2\2\2\u010f\u035c\3\2\2\2\u0111\u0361\3\2\2\2") + buf.write("\u0113\u0366\3\2\2\2\u0115\u036a\3\2\2\2\u0117\u036f\3") + buf.write("\2\2\2\u0119\u0373\3\2\2\2\u011b\u0377\3\2\2\2\u011d\u037c") + buf.write("\3\2\2\2\u011f\u0383\3\2\2\2\u0121\u0388\3\2\2\2\u0123") + buf.write("\u038d\3\2\2\2\u0125\u0392\3\2\2\2\u0127\u0396\3\2\2\2") + buf.write("\u0129\u039b\3\2\2\2\u012b\u039f\3\2\2\2\u012d\u03a3\3") + buf.write("\2\2\2\u012f\u03a8\3\2\2\2\u0131\u03b1\3\2\2\2\u0133\u03b7") + buf.write("\3\2\2\2\u0135\u03bb\3\2\2\2\u0137\u03bf\3\2\2\2\u0139") + buf.write("\u013a\5w9\2\u013a\n\3\2\2\2\u013b\u013c\5u8\2\u013c\u013d") + buf.write("\3\2\2\2\u013d\u013e\b\3\2\2\u013e\f\3\2\2\2\u013f\u0140") + buf.write("\5y:\2\u0140\u0141\3\2\2\2\u0141\u0142\b\4\2\2\u0142\16") + buf.write("\3\2\2\2\u0143\u0144\5\u0081>\2\u0144\20\3\2\2\2\u0145") + buf.write("\u0146\5\u008bC\2\u0146\22\3\2\2\2\u0147\u0148\5\u008f") + buf.write("E\2\u0148\24\3\2\2\2\u0149\u014a\5\u00a9R\2\u014a\u014b") + buf.write("\b\b\3\2\u014b\26\3\2\2\2\u014c\u014d\5\u00a5P\2\u014d") + buf.write("\u014e\3\2\2\2\u014e\u014f\b\t\4\2\u014f\30\3\2\2\2\u0150") + buf.write("\u0151\7q\2\2\u0151\u0152\7r\2\2\u0152\u0153\7v\2\2\u0153") + buf.write("\u0154\7k\2\2\u0154\u0155\7q\2\2\u0155\u0156\7p\2\2\u0156") + buf.write("\u0157\7u\2\2\u0157\u0158\3\2\2\2\u0158\u0159\b\n\5\2") + buf.write("\u0159\32\3\2\2\2\u015a\u015b\7v\2\2\u015b\u015c\7q\2") + buf.write("\2\u015c\u015d\7m\2\2\u015d\u015e\7g\2\2\u015e\u015f\7") + buf.write("p\2\2\u015f\u0160\7u\2\2\u0160\u0161\3\2\2\2\u0161\u0162") + buf.write("\b\13\6\2\u0162\34\3\2\2\2\u0163\u0164\7e\2\2\u0164\u0165") + buf.write("\7j\2\2\u0165\u0166\7c\2\2\u0166\u0167\7p\2\2\u0167\u0168") + buf.write("\7p\2\2\u0168\u0169\7g\2\2\u0169\u016a\7n\2\2\u016a\u016b") + buf.write("\7u\2\2\u016b\u016c\3\2\2\2\u016c\u016d\b\f\7\2\u016d") + buf.write("\36\3\2\2\2\u016e\u016f\7k\2\2\u016f\u0170\7o\2\2\u0170") + buf.write("\u0171\7r\2\2\u0171\u0172\7q\2\2\u0172\u0173\7t\2\2\u0173") + buf.write("\u0174\7v\2\2\u0174 \3\2\2\2\u0175\u0176\7h\2\2\u0176") + buf.write("\u0177\7t\2\2\u0177\u0178\7c\2\2\u0178\u0179\7i\2\2\u0179") + buf.write("\u017a\7o\2\2\u017a\u017b\7g\2\2\u017b\u017c\7p\2\2\u017c") + buf.write("\u017d\7v\2\2\u017d\"\3\2\2\2\u017e\u017f\7n\2\2\u017f") + buf.write("\u0180\7g\2\2\u0180\u0181\7z\2\2\u0181\u0182\7g\2\2\u0182") + buf.write("\u0183\7t\2\2\u0183$\3\2\2\2\u0184\u0185\7r\2\2\u0185") + buf.write("\u0186\7c\2\2\u0186\u0187\7t\2\2\u0187\u0188\7u\2\2\u0188") + buf.write("\u0189\7g\2\2\u0189\u018a\7t\2\2\u018a&\3\2\2\2\u018b") + buf.write("\u018c\7i\2\2\u018c\u018d\7t\2\2\u018d\u018e\7c\2\2\u018e") + buf.write("\u018f\7o\2\2\u018f\u0190\7o\2\2\u0190\u0191\7c\2\2\u0191") + buf.write("\u0192\7t\2\2\u0192(\3\2\2\2\u0193\u0194\7r\2\2\u0194") + buf.write("\u0195\7t\2\2\u0195\u0196\7q\2\2\u0196\u0197\7v\2\2\u0197") + buf.write("\u0198\7g\2\2\u0198\u0199\7e\2\2\u0199\u019a\7v\2\2\u019a") + buf.write("\u019b\7g\2\2\u019b\u019c\7f\2\2\u019c*\3\2\2\2\u019d") + buf.write("\u019e\7r\2\2\u019e\u019f\7w\2\2\u019f\u01a0\7d\2\2\u01a0") + buf.write("\u01a1\7n\2\2\u01a1\u01a2\7k\2\2\u01a2\u01a3\7e\2\2\u01a3") + buf.write(",\3\2\2\2\u01a4\u01a5\7r\2\2\u01a5\u01a6\7t\2\2\u01a6") + buf.write("\u01a7\7k\2\2\u01a7\u01a8\7x\2\2\u01a8\u01a9\7c\2\2\u01a9") + buf.write("\u01aa\7v\2\2\u01aa\u01ab\7g\2\2\u01ab.\3\2\2\2\u01ac") + buf.write("\u01ad\7t\2\2\u01ad\u01ae\7g\2\2\u01ae\u01af\7v\2\2\u01af") + buf.write("\u01b0\7w\2\2\u01b0\u01b1\7t\2\2\u01b1\u01b2\7p\2\2\u01b2") + buf.write("\u01b3\7u\2\2\u01b3\60\3\2\2\2\u01b4\u01b5\7n\2\2\u01b5") + buf.write("\u01b6\7q\2\2\u01b6\u01b7\7e\2\2\u01b7\u01b8\7c\2\2\u01b8") + buf.write("\u01b9\7n\2\2\u01b9\u01ba\7u\2\2\u01ba\62\3\2\2\2\u01bb") + buf.write("\u01bc\7v\2\2\u01bc\u01bd\7j\2\2\u01bd\u01be\7t\2\2\u01be") + buf.write("\u01bf\7q\2\2\u01bf\u01c0\7y\2\2\u01c0\u01c1\7u\2\2\u01c1") + buf.write("\64\3\2\2\2\u01c2\u01c3\7e\2\2\u01c3\u01c4\7c\2\2\u01c4") + buf.write("\u01c5\7v\2\2\u01c5\u01c6\7e\2\2\u01c6\u01c7\7j\2\2\u01c7") + buf.write("\66\3\2\2\2\u01c8\u01c9\7h\2\2\u01c9\u01ca\7k\2\2\u01ca") + buf.write("\u01cb\7p\2\2\u01cb\u01cc\7c\2\2\u01cc\u01cd\7n\2\2\u01cd") + buf.write("\u01ce\7n\2\2\u01ce\u01cf\7{\2\2\u01cf8\3\2\2\2\u01d0") + buf.write("\u01d1\7o\2\2\u01d1\u01d2\7q\2\2\u01d2\u01d3\7f\2\2\u01d3") + buf.write("\u01d4\7g\2\2\u01d4:\3\2\2\2\u01d5\u01d6\5\u0099J\2\u01d6") + buf.write("<\3\2\2\2\u01d7\u01d8\5\u009bK\2\u01d8>\3\2\2\2\u01d9") + buf.write("\u01da\5\u00c3_\2\u01da@\3\2\2\2\u01db\u01dc\5\u00c5`") + buf.write("\2\u01dcB\3\2\2\2\u01dd\u01de\5\u00a1N\2\u01deD\3\2\2") + buf.write("\2\u01df\u01e0\5\u00a3O\2\u01e0F\3\2\2\2\u01e1\u01e2\5") + buf.write("\u00a5P\2\u01e2H\3\2\2\2\u01e3\u01e4\5\u00a7Q\2\u01e4") + buf.write("J\3\2\2\2\u01e5\u01e6\5\u00adT\2\u01e6L\3\2\2\2\u01e7") + buf.write("\u01e8\5\u00afU\2\u01e8N\3\2\2\2\u01e9\u01ea\5\u00b1V") + buf.write("\2\u01eaP\3\2\2\2\u01eb\u01ec\5\u00b3W\2\u01ecR\3\2\2") + buf.write("\2\u01ed\u01ee\5\u00b5X\2\u01eeT\3\2\2\2\u01ef\u01f0\5") + buf.write("\u00b7Y\2\u01f0V\3\2\2\2\u01f1\u01f2\5\u00bb[\2\u01f2") + buf.write("X\3\2\2\2\u01f3\u01f4\5\u00b9Z\2\u01f4Z\3\2\2\2\u01f5") + buf.write("\u01f6\5\u00bf]\2\u01f6\\\3\2\2\2\u01f7\u01f8\5\u00c1") + buf.write("^\2\u01f8^\3\2\2\2\u01f9\u01fa\5\u00c9b\2\u01fa`\3\2\2") + buf.write("\2\u01fb\u01fc\5\u00c7a\2\u01fcb\3\2\2\2\u01fd\u01fe\5") + buf.write("\u00cbc\2\u01fed\3\2\2\2\u01ff\u0200\5\u00cdd\2\u0200") + buf.write("f\3\2\2\2\u0201\u0202\5\u00cfe\2\u0202h\3\2\2\2\u0203") + buf.write("\u0204\5\u0137\u0099\2\u0204j\3\2\2\2\u0205\u0207\5o\65") + buf.write("\2\u0206\u0205\3\2\2\2\u0207\u0208\3\2\2\2\u0208\u0206") + buf.write("\3\2\2\2\u0208\u0209\3\2\2\2\u0209\u020a\3\2\2\2\u020a") + buf.write("\u020b\b\63\2\2\u020bl\3\2\2\2\u020c\u020d\13\2\2\2\u020d") + buf.write("\u020e\3\2\2\2\u020e\u020f\b\64\b\2\u020fn\3\2\2\2\u0210") + buf.write("\u0213\5q\66\2\u0211\u0213\5s\67\2\u0212\u0210\3\2\2\2") + buf.write("\u0212\u0211\3\2\2\2\u0213p\3\2\2\2\u0214\u0215\t\2\2") + buf.write("\2\u0215r\3\2\2\2\u0216\u0217\t\3\2\2\u0217t\3\2\2\2\u0218") + buf.write("\u0219\7\61\2\2\u0219\u021a\7,\2\2\u021a\u021e\3\2\2\2") + buf.write("\u021b\u021d\13\2\2\2\u021c\u021b\3\2\2\2\u021d\u0220") + buf.write("\3\2\2\2\u021e\u021f\3\2\2\2\u021e\u021c\3\2\2\2\u021f") + buf.write("\u0224\3\2\2\2\u0220\u021e\3\2\2\2\u0221\u0222\7,\2\2") + buf.write("\u0222\u0225\7\61\2\2\u0223\u0225\7\2\2\3\u0224\u0221") + buf.write("\3\2\2\2\u0224\u0223\3\2\2\2\u0225v\3\2\2\2\u0226\u0227") + buf.write("\7\61\2\2\u0227\u0228\7,\2\2\u0228\u0229\7,\2\2\u0229") + buf.write("\u022d\3\2\2\2\u022a\u022c\13\2\2\2\u022b\u022a\3\2\2") + buf.write("\2\u022c\u022f\3\2\2\2\u022d\u022e\3\2\2\2\u022d\u022b") + buf.write("\3\2\2\2\u022e\u0233\3\2\2\2\u022f\u022d\3\2\2\2\u0230") + buf.write("\u0231\7,\2\2\u0231\u0234\7\61\2\2\u0232\u0234\7\2\2\3") + buf.write("\u0233\u0230\3\2\2\2\u0233\u0232\3\2\2\2\u0234x\3\2\2") + buf.write("\2\u0235\u0236\7\61\2\2\u0236\u0237\7\61\2\2\u0237\u023b") + buf.write("\3\2\2\2\u0238\u023a\n\4\2\2\u0239\u0238\3\2\2\2\u023a") + buf.write("\u023d\3\2\2\2\u023b\u0239\3\2\2\2\u023b\u023c\3\2\2\2") + buf.write("\u023cz\3\2\2\2\u023d\u023b\3\2\2\2\u023e\u0243\5\u0097") + buf.write("I\2\u023f\u0244\t\5\2\2\u0240\u0244\5\177=\2\u0241\u0244") + buf.write("\13\2\2\2\u0242\u0244\7\2\2\3\u0243\u023f\3\2\2\2\u0243") + buf.write("\u0240\3\2\2\2\u0243\u0241\3\2\2\2\u0243\u0242\3\2\2\2") + buf.write("\u0244|\3\2\2\2\u0245\u0246\5\u0097I\2\u0246\u0247\13") + buf.write("\2\2\2\u0247~\3\2\2\2\u0248\u0253\7w\2\2\u0249\u0251\5") + buf.write("\u0083?\2\u024a\u024f\5\u0083?\2\u024b\u024d\5\u0083?") + buf.write("\2\u024c\u024e\5\u0083?\2\u024d\u024c\3\2\2\2\u024d\u024e") + buf.write("\3\2\2\2\u024e\u0250\3\2\2\2\u024f\u024b\3\2\2\2\u024f") + buf.write("\u0250\3\2\2\2\u0250\u0252\3\2\2\2\u0251\u024a\3\2\2\2") + buf.write("\u0251\u0252\3\2\2\2\u0252\u0254\3\2\2\2\u0253\u0249\3") + buf.write("\2\2\2\u0253\u0254\3\2\2\2\u0254\u0080\3\2\2\2\u0255\u025e") + buf.write("\7\62\2\2\u0256\u025a\t\6\2\2\u0257\u0259\5\u0085@\2\u0258") + buf.write("\u0257\3\2\2\2\u0259\u025c\3\2\2\2\u025a\u0258\3\2\2\2") + buf.write("\u025a\u025b\3\2\2\2\u025b\u025e\3\2\2\2\u025c\u025a\3") + buf.write("\2\2\2\u025d\u0255\3\2\2\2\u025d\u0256\3\2\2\2\u025e\u0082") + buf.write("\3\2\2\2\u025f\u0260\t\7\2\2\u0260\u0084\3\2\2\2\u0261") + buf.write("\u0262\t\b\2\2\u0262\u0086\3\2\2\2\u0263\u0264\7v\2\2") + buf.write("\u0264\u0265\7t\2\2\u0265\u0266\7w\2\2\u0266\u026d\7g") + buf.write("\2\2\u0267\u0268\7h\2\2\u0268\u0269\7c\2\2\u0269\u026a") + buf.write("\7n\2\2\u026a\u026b\7u\2\2\u026b\u026d\7g\2\2\u026c\u0263") + buf.write("\3\2\2\2\u026c\u0267\3\2\2\2\u026d\u0088\3\2\2\2\u026e") + buf.write("\u0271\5\u009dL\2\u026f\u0272\5{;\2\u0270\u0272\n\t\2") + buf.write("\2\u0271\u026f\3\2\2\2\u0271\u0270\3\2\2\2\u0272\u0273") + buf.write("\3\2\2\2\u0273\u0274\5\u009dL\2\u0274\u008a\3\2\2\2\u0275") + buf.write("\u027a\5\u009dL\2\u0276\u0279\5{;\2\u0277\u0279\n\t\2") + buf.write("\2\u0278\u0276\3\2\2\2\u0278\u0277\3\2\2\2\u0279\u027c") + buf.write("\3\2\2\2\u027a\u0278\3\2\2\2\u027a\u027b\3\2\2\2\u027b") + buf.write("\u027d\3\2\2\2\u027c\u027a\3\2\2\2\u027d\u027e\5\u009d") + buf.write("L\2\u027e\u008c\3\2\2\2\u027f\u0284\5\u009fM\2\u0280\u0283") + buf.write("\5{;\2\u0281\u0283\n\n\2\2\u0282\u0280\3\2\2\2\u0282\u0281") + buf.write("\3\2\2\2\u0283\u0286\3\2\2\2\u0284\u0282\3\2\2\2\u0284") + buf.write("\u0285\3\2\2\2\u0285\u0287\3\2\2\2\u0286\u0284\3\2\2\2") + buf.write("\u0287\u0288\5\u009fM\2\u0288\u008e\3\2\2\2\u0289\u028e") + buf.write("\5\u009dL\2\u028a\u028d\5{;\2\u028b\u028d\n\t\2\2\u028c") + buf.write("\u028a\3\2\2\2\u028c\u028b\3\2\2\2\u028d\u0290\3\2\2\2") + buf.write("\u028e\u028c\3\2\2\2\u028e\u028f\3\2\2\2\u028f\u0090\3") + buf.write("\2\2\2\u0290\u028e\3\2\2\2\u0291\u0296\5\u0093G\2\u0292") + buf.write("\u0296\4\62;\2\u0293\u0296\5\u00bd\\\2\u0294\u0296\t\13") + buf.write("\2\2\u0295\u0291\3\2\2\2\u0295\u0292\3\2\2\2\u0295\u0293") + buf.write("\3\2\2\2\u0295\u0294\3\2\2\2\u0296\u0092\3\2\2\2\u0297") + buf.write("\u0298\t\f\2\2\u0298\u0094\3\2\2\2\u0299\u029a\7k\2\2") + buf.write("\u029a\u029b\7p\2\2\u029b\u029c\7v\2\2\u029c\u0096\3\2") + buf.write("\2\2\u029d\u029e\7^\2\2\u029e\u0098\3\2\2\2\u029f\u02a0") + buf.write("\7<\2\2\u02a0\u009a\3\2\2\2\u02a1\u02a2\7<\2\2\u02a2\u02a3") + buf.write("\7<\2\2\u02a3\u009c\3\2\2\2\u02a4\u02a5\7)\2\2\u02a5\u009e") + buf.write("\3\2\2\2\u02a6\u02a7\7$\2\2\u02a7\u00a0\3\2\2\2\u02a8") + buf.write("\u02a9\7*\2\2\u02a9\u00a2\3\2\2\2\u02aa\u02ab\7+\2\2\u02ab") + buf.write("\u00a4\3\2\2\2\u02ac\u02ad\7}\2\2\u02ad\u00a6\3\2\2\2") + buf.write("\u02ae\u02af\7\177\2\2\u02af\u00a8\3\2\2\2\u02b0\u02b1") + buf.write("\7]\2\2\u02b1\u00aa\3\2\2\2\u02b2\u02b3\7_\2\2\u02b3\u00ac") + buf.write("\3\2\2\2\u02b4\u02b5\7/\2\2\u02b5\u02b6\7@\2\2\u02b6\u00ae") + buf.write("\3\2\2\2\u02b7\u02b8\7>\2\2\u02b8\u00b0\3\2\2\2\u02b9") + buf.write("\u02ba\7@\2\2\u02ba\u00b2\3\2\2\2\u02bb\u02bc\7?\2\2\u02bc") + buf.write("\u00b4\3\2\2\2\u02bd\u02be\7A\2\2\u02be\u00b6\3\2\2\2") + buf.write("\u02bf\u02c0\7,\2\2\u02c0\u00b8\3\2\2\2\u02c1\u02c2\7") + buf.write("-\2\2\u02c2\u00ba\3\2\2\2\u02c3\u02c4\7-\2\2\u02c4\u02c5") + buf.write("\7?\2\2\u02c5\u00bc\3\2\2\2\u02c6\u02c7\7a\2\2\u02c7\u00be") + buf.write("\3\2\2\2\u02c8\u02c9\7~\2\2\u02c9\u00c0\3\2\2\2\u02ca") + buf.write("\u02cb\7&\2\2\u02cb\u00c2\3\2\2\2\u02cc\u02cd\7.\2\2\u02cd") + buf.write("\u00c4\3\2\2\2\u02ce\u02cf\7=\2\2\u02cf\u00c6\3\2\2\2") + buf.write("\u02d0\u02d1\7\60\2\2\u02d1\u00c8\3\2\2\2\u02d2\u02d3") + buf.write("\7\60\2\2\u02d3\u02d4\7\60\2\2\u02d4\u00ca\3\2\2\2\u02d5") + buf.write("\u02d6\7B\2\2\u02d6\u00cc\3\2\2\2\u02d7\u02d8\7%\2\2\u02d8") + buf.write("\u00ce\3\2\2\2\u02d9\u02da\7\u0080\2\2\u02da\u00d0\3\2") + buf.write("\2\2\u02db\u02dc\5\u00a9R\2\u02dc\u02dd\3\2\2\2\u02dd") + buf.write("\u02de\bf\t\2\u02de\u02df\bf\n\2\u02df\u00d2\3\2\2\2\u02e0") + buf.write("\u02e1\5}<\2\u02e1\u02e2\3\2\2\2\u02e2\u02e3\bg\t\2\u02e3") + buf.write("\u00d4\3\2\2\2\u02e4\u02e5\5\u008dD\2\u02e5\u02e6\3\2") + buf.write("\2\2\u02e6\u02e7\bh\t\2\u02e7\u00d6\3\2\2\2\u02e8\u02e9") + buf.write("\5\u008bC\2\u02e9\u02ea\3\2\2\2\u02ea\u02eb\bi\t\2\u02eb") + buf.write("\u00d8\3\2\2\2\u02ec\u02ed\5\u00abS\2\u02ed\u02ee\bj\13") + buf.write("\2\u02ee\u00da\3\2\2\2\u02ef\u02f0\7\2\2\3\u02f0\u02f1") + buf.write("\3\2\2\2\u02f1\u02f2\bk\f\2\u02f2\u00dc\3\2\2\2\u02f3") + buf.write("\u02f4\13\2\2\2\u02f4\u00de\3\2\2\2\u02f5\u02f6\5\u00a5") + buf.write("P\2\u02f6\u02f7\3\2\2\2\u02f7\u02f8\bm\r\2\u02f8\u02f9") + buf.write("\bm\4\2\u02f9\u00e0\3\2\2\2\u02fa\u02fb\5}<\2\u02fb\u02fc") + buf.write("\3\2\2\2\u02fc\u02fd\bn\r\2\u02fd\u00e2\3\2\2\2\u02fe") + buf.write("\u02ff\5\u008dD\2\u02ff\u0300\3\2\2\2\u0300\u0301\bo\r") + buf.write("\2\u0301\u00e4\3\2\2\2\u0302\u0303\5\u008bC\2\u0303\u0304") + buf.write("\3\2\2\2\u0304\u0305\bp\r\2\u0305\u00e6\3\2\2\2\u0306") + buf.write("\u0307\5w9\2\u0307\u0308\3\2\2\2\u0308\u0309\bq\r\2\u0309") + buf.write("\u00e8\3\2\2\2\u030a\u030b\5u8\2\u030b\u030c\3\2\2\2\u030c") + buf.write("\u030d\br\r\2\u030d\u00ea\3\2\2\2\u030e\u030f\5y:\2\u030f") + buf.write("\u0310\3\2\2\2\u0310\u0311\bs\r\2\u0311\u00ec\3\2\2\2") + buf.write("\u0312\u0313\5\u00a7Q\2\u0313\u0314\bt\16\2\u0314\u00ee") + buf.write("\3\2\2\2\u0315\u0316\7\2\2\3\u0316\u0317\3\2\2\2\u0317") + buf.write("\u0318\bu\f\2\u0318\u00f0\3\2\2\2\u0319\u031a\13\2\2\2") + buf.write("\u031a\u00f2\3\2\2\2\u031b\u031c\5w9\2\u031c\u031d\3\2") + buf.write("\2\2\u031d\u031e\bw\17\2\u031e\u031f\bw\2\2\u031f\u00f4") + buf.write("\3\2\2\2\u0320\u0321\5u8\2\u0321\u0322\3\2\2\2\u0322\u0323") + buf.write("\bx\20\2\u0323\u0324\bx\2\2\u0324\u00f6\3\2\2\2\u0325") + buf.write("\u0326\5y:\2\u0326\u0327\3\2\2\2\u0327\u0328\by\21\2\u0328") + buf.write("\u0329\by\2\2\u0329\u00f8\3\2\2\2\u032a\u032b\5\u00a5") + buf.write("P\2\u032b\u032c\3\2\2\2\u032c\u032d\bz\22\2\u032d\u00fa") + buf.write("\3\2\2\2\u032e\u032f\5\u00a7Q\2\u032f\u0330\3\2\2\2\u0330") + buf.write("\u0331\b{\23\2\u0331\u0332\b{\f\2\u0332\u00fc\3\2\2\2") + buf.write("\u0333\u0334\5\u0137\u0099\2\u0334\u0335\3\2\2\2\u0335") + buf.write("\u0336\b|\24\2\u0336\u00fe\3\2\2\2\u0337\u0338\5\u00c7") + buf.write("a\2\u0338\u0339\3\2\2\2\u0339\u033a\b}\25\2\u033a\u0100") + buf.write("\3\2\2\2\u033b\u033c\5\u00b3W\2\u033c\u033d\3\2\2\2\u033d") + buf.write("\u033e\b~\26\2\u033e\u0102\3\2\2\2\u033f\u0340\5\u008b") + buf.write("C\2\u0340\u0341\3\2\2\2\u0341\u0342\b\177\27\2\u0342\u0104") + buf.write("\3\2\2\2\u0343\u0344\5\u0095H\2\u0344\u0345\3\2\2\2\u0345") + buf.write("\u0346\b\u0080\30\2\u0346\u0106\3\2\2\2\u0347\u0348\5") + buf.write("\u00b7Y\2\u0348\u0349\3\2\2\2\u0349\u034a\b\u0081\31\2") + buf.write("\u034a\u0108\3\2\2\2\u034b\u034c\5\u00c5`\2\u034c\u034d") + buf.write("\3\2\2\2\u034d\u034e\b\u0082\32\2\u034e\u010a\3\2\2\2") + buf.write("\u034f\u0351\5o\65\2\u0350\u034f\3\2\2\2\u0351\u0352\3") + buf.write("\2\2\2\u0352\u0350\3\2\2\2\u0352\u0353\3\2\2\2\u0353\u0354") + buf.write("\3\2\2\2\u0354\u0355\b\u0083\33\2\u0355\u0356\b\u0083") + buf.write("\2\2\u0356\u010c\3\2\2\2\u0357\u0358\5w9\2\u0358\u0359") + buf.write("\3\2\2\2\u0359\u035a\b\u0084\17\2\u035a\u035b\b\u0084") + buf.write("\2\2\u035b\u010e\3\2\2\2\u035c\u035d\5u8\2\u035d\u035e") + buf.write("\3\2\2\2\u035e\u035f\b\u0085\20\2\u035f\u0360\b\u0085") + buf.write("\2\2\u0360\u0110\3\2\2\2\u0361\u0362\5y:\2\u0362\u0363") + buf.write("\3\2\2\2\u0363\u0364\b\u0086\21\2\u0364\u0365\b\u0086") + buf.write("\2\2\u0365\u0112\3\2\2\2\u0366\u0367\5\u00a5P\2\u0367") + buf.write("\u0368\3\2\2\2\u0368\u0369\b\u0087\22\2\u0369\u0114\3") + buf.write("\2\2\2\u036a\u036b\5\u00a7Q\2\u036b\u036c\3\2\2\2\u036c") + buf.write("\u036d\b\u0088\23\2\u036d\u036e\b\u0088\f\2\u036e\u0116") + buf.write("\3\2\2\2\u036f\u0370\5\u0137\u0099\2\u0370\u0371\3\2\2") + buf.write("\2\u0371\u0372\b\u0089\24\2\u0372\u0118\3\2\2\2\u0373") + buf.write("\u0374\5\u00c7a\2\u0374\u0375\3\2\2\2\u0375\u0376\b\u008a") + buf.write("\25\2\u0376\u011a\3\2\2\2\u0377\u0378\5\u00c3_\2\u0378") + buf.write("\u0379\3\2\2\2\u0379\u037a\b\u008b\34\2\u037a\u011c\3") + buf.write("\2\2\2\u037b\u037d\5o\65\2\u037c\u037b\3\2\2\2\u037d\u037e") + buf.write("\3\2\2\2\u037e\u037c\3\2\2\2\u037e\u037f\3\2\2\2\u037f") + buf.write("\u0380\3\2\2\2\u0380\u0381\b\u008c\33\2\u0381\u0382\b") + buf.write("\u008c\2\2\u0382\u011e\3\2\2\2\u0383\u0384\5w9\2\u0384") + buf.write("\u0385\3\2\2\2\u0385\u0386\b\u008d\17\2\u0386\u0387\b") + buf.write("\u008d\2\2\u0387\u0120\3\2\2\2\u0388\u0389\5u8\2\u0389") + buf.write("\u038a\3\2\2\2\u038a\u038b\b\u008e\20\2\u038b\u038c\b") + buf.write("\u008e\2\2\u038c\u0122\3\2\2\2\u038d\u038e\5y:\2\u038e") + buf.write("\u038f\3\2\2\2\u038f\u0390\b\u008f\21\2\u0390\u0391\b") + buf.write("\u008f\2\2\u0391\u0124\3\2\2\2\u0392\u0393\5\u00a5P\2") + buf.write("\u0393\u0394\3\2\2\2\u0394\u0395\b\u0090\22\2\u0395\u0126") + buf.write("\3\2\2\2\u0396\u0397\5\u00a7Q\2\u0397\u0398\3\2\2\2\u0398") + buf.write("\u0399\b\u0091\23\2\u0399\u039a\b\u0091\f\2\u039a\u0128") + buf.write("\3\2\2\2\u039b\u039c\5\u0137\u0099\2\u039c\u039d\3\2\2") + buf.write("\2\u039d\u039e\b\u0092\24\2\u039e\u012a\3\2\2\2\u039f") + buf.write("\u03a0\5\u00c7a\2\u03a0\u03a1\3\2\2\2\u03a1\u03a2\b\u0093") + buf.write("\25\2\u03a2\u012c\3\2\2\2\u03a3\u03a4\5\u00c3_\2\u03a4") + buf.write("\u03a5\3\2\2\2\u03a5\u03a6\b\u0094\34\2\u03a6\u012e\3") + buf.write("\2\2\2\u03a7\u03a9\5o\65\2\u03a8\u03a7\3\2\2\2\u03a9\u03aa") + buf.write("\3\2\2\2\u03aa\u03a8\3\2\2\2\u03aa\u03ab\3\2\2\2\u03ab") + buf.write("\u03ac\3\2\2\2\u03ac\u03ad\b\u0095\33\2\u03ad\u03ae\b") + buf.write("\u0095\2\2\u03ae\u0130\3\2\2\2\u03af\u03b2\n\r\2\2\u03b0") + buf.write("\u03b2\5}<\2\u03b1\u03af\3\2\2\2\u03b1\u03b0\3\2\2\2\u03b2") + buf.write("\u03b3\3\2\2\2\u03b3\u03b1\3\2\2\2\u03b3\u03b4\3\2\2\2") + buf.write("\u03b4\u03b5\3\2\2\2\u03b5\u03b6\b\u0096\35\2\u03b6\u0132") + buf.write("\3\2\2\2\u03b7\u03b8\5\u00abS\2\u03b8\u03b9\3\2\2\2\u03b9") + buf.write("\u03ba\b\u0097\f\2\u03ba\u0134\3\2\2\2\u03bb\u03bc\7\2") + buf.write("\2\3\u03bc\u03bd\3\2\2\2\u03bd\u03be\b\u0098\f\2\u03be") + buf.write("\u0136\3\2\2\2\u03bf\u03c3\5\u0093G\2\u03c0\u03c2\5\u0091") + buf.write("F\2\u03c1\u03c0\3\2\2\2\u03c2\u03c5\3\2\2\2\u03c3\u03c1") + buf.write("\3\2\2\2\u03c3\u03c4\3\2\2\2\u03c4\u0138\3\2\2\2\u03c5") + buf.write("\u03c3\3\2\2\2&\2\3\4\5\6\7\b\u0208\u0212\u021e\u0224") + buf.write("\u022d\u0233\u023b\u0243\u024d\u024f\u0251\u0253\u025a") + buf.write("\u025d\u026c\u0271\u0278\u027a\u0282\u0284\u028c\u028e") + buf.write("\u0295\u0352\u037e\u03aa\u03b1\u03b3\u03c3\36\2\4\2\3") + buf.write("\b\2\7\4\2\7\5\2\7\6\2\7\7\2\2\3\2\t;\2\7\3\2\3j\3\6\2") + buf.write("\2\t>\2\3t\4\t\6\2\t\7\2\t\b\2\t%\2\t&\2\t\66\2\t\62\2") + buf.write("\t*\2\t\n\2\t\t\2\t,\2\t\"\2\t\67\2\t!\2\5\2\2") + return buf.getvalue() + + +class ANTLRv4Lexer(LexerAdaptor): + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + Argument = 1 + Action = 2 + Options = 3 + Tokens = 4 + Channels = 5 + LexerCharSet = 6 + + TOKEN_REF = 1 + RULE_REF = 2 + LEXER_CHAR_SET = 3 + DOC_COMMENT = 4 + BLOCK_COMMENT = 5 + LINE_COMMENT = 6 + INT = 7 + STRING_LITERAL = 8 + UNTERMINATED_STRING_LITERAL = 9 + BEGIN_ARGUMENT = 10 + BEGIN_ACTION = 11 + OPTIONS = 12 + TOKENS = 13 + CHANNELS = 14 + IMPORT = 15 + FRAGMENT = 16 + LEXER = 17 + PARSER = 18 + GRAMMAR = 19 + PROTECTED = 20 + PUBLIC = 21 + PRIVATE = 22 + RETURNS = 23 + LOCALS = 24 + THROWS = 25 + CATCH = 26 + FINALLY = 27 + MODE = 28 + COLON = 29 + COLONCOLON = 30 + COMMA = 31 + SEMI = 32 + LPAREN = 33 + RPAREN = 34 + LBRACE = 35 + RBRACE = 36 + RARROW = 37 + LT = 38 + GT = 39 + ASSIGN = 40 + QUESTION = 41 + STAR = 42 + PLUS_ASSIGN = 43 + PLUS = 44 + OR = 45 + DOLLAR = 46 + RANGE = 47 + DOT = 48 + AT = 49 + POUND = 50 + NOT = 51 + ID = 52 + WS = 53 + ERRCHAR = 54 + END_ARGUMENT = 55 + UNTERMINATED_ARGUMENT = 56 + ARGUMENT_CONTENT = 57 + END_ACTION = 58 + UNTERMINATED_ACTION = 59 + ACTION_CONTENT = 60 + UNTERMINATED_CHAR_SET = 61 + + modeNames = [ u"DEFAULT_MODE", u"Argument", u"Action", u"Options", u"Tokens", + u"Channels", u"LexerCharSet" ] + + literalNames = [ u"", + "'options'", "'tokens'", "'channels'", "'import'", "'fragment'", + "'lexer'", "'parser'", "'grammar'", "'protected'", "'public'", + "'private'", "'returns'", "'locals'", "'throws'", "'catch'", + "'finally'", "'mode'" ] + + symbolicNames = [ u"", + "TOKEN_REF", "RULE_REF", "LEXER_CHAR_SET", "DOC_COMMENT", "BLOCK_COMMENT", + "LINE_COMMENT", "INT", "STRING_LITERAL", "UNTERMINATED_STRING_LITERAL", + "BEGIN_ARGUMENT", "BEGIN_ACTION", "OPTIONS", "TOKENS", "CHANNELS", + "IMPORT", "FRAGMENT", "LEXER", "PARSER", "GRAMMAR", "PROTECTED", + "PUBLIC", "PRIVATE", "RETURNS", "LOCALS", "THROWS", "CATCH", + "FINALLY", "MODE", "COLON", "COLONCOLON", "COMMA", "SEMI", "LPAREN", + "RPAREN", "LBRACE", "RBRACE", "RARROW", "LT", "GT", "ASSIGN", + "QUESTION", "STAR", "PLUS_ASSIGN", "PLUS", "OR", "DOLLAR", "RANGE", + "DOT", "AT", "POUND", "NOT", "ID", "WS", "ERRCHAR", "END_ARGUMENT", + "UNTERMINATED_ARGUMENT", "ARGUMENT_CONTENT", "END_ACTION", "UNTERMINATED_ACTION", + "ACTION_CONTENT", "UNTERMINATED_CHAR_SET" ] + + ruleNames = [ "DOC_COMMENT", "BLOCK_COMMENT", "LINE_COMMENT", "INT", + "STRING_LITERAL", "UNTERMINATED_STRING_LITERAL", "BEGIN_ARGUMENT", + "BEGIN_ACTION", "OPTIONS", "TOKENS", "CHANNELS", "IMPORT", + "FRAGMENT", "LEXER", "PARSER", "GRAMMAR", "PROTECTED", + "PUBLIC", "PRIVATE", "RETURNS", "LOCALS", "THROWS", "CATCH", + "FINALLY", "MODE", "COLON", "COLONCOLON", "COMMA", "SEMI", + "LPAREN", "RPAREN", "LBRACE", "RBRACE", "RARROW", "LT", + "GT", "ASSIGN", "QUESTION", "STAR", "PLUS_ASSIGN", "PLUS", + "OR", "DOLLAR", "RANGE", "DOT", "AT", "POUND", "NOT", + "ID", "WS", "ERRCHAR", "Ws", "Hws", "Vws", "BlockComment", + "DocComment", "LineComment", "EscSeq", "EscAny", "UnicodeEsc", + "DecimalNumeral", "HexDigit", "DecDigit", "BoolLiteral", + "CharLiteral", "SQuoteLiteral", "DQuoteLiteral", "USQuoteLiteral", + "NameChar", "NameStartChar", "Int", "Esc", "Colon", "DColon", + "SQuote", "DQuote", "LParen", "RParen", "LBrace", "RBrace", + "LBrack", "RBrack", "RArrow", "Lt", "Gt", "Equal", "Question", + "Star", "Plus", "PlusAssign", "Underscore", "Pipe", "Dollar", + "Comma", "Semi", "Dot", "Range", "At", "Pound", "Tilde", + "NESTED_ARGUMENT", "ARGUMENT_ESCAPE", "ARGUMENT_STRING_LITERAL", + "ARGUMENT_CHAR_LITERAL", "END_ARGUMENT", "UNTERMINATED_ARGUMENT", + "ARGUMENT_CONTENT", "NESTED_ACTION", "ACTION_ESCAPE", + "ACTION_STRING_LITERAL", "ACTION_CHAR_LITERAL", "ACTION_DOC_COMMENT", + "ACTION_BLOCK_COMMENT", "ACTION_LINE_COMMENT", "END_ACTION", + "UNTERMINATED_ACTION", "ACTION_CONTENT", "OPT_DOC_COMMENT", + "OPT_BLOCK_COMMENT", "OPT_LINE_COMMENT", "OPT_LBRACE", + "OPT_RBRACE", "OPT_ID", "OPT_DOT", "OPT_ASSIGN", "OPT_STRING_LITERAL", + "OPT_INT", "OPT_STAR", "OPT_SEMI", "OPT_WS", "TOK_DOC_COMMENT", + "TOK_BLOCK_COMMENT", "TOK_LINE_COMMENT", "TOK_LBRACE", + "TOK_RBRACE", "TOK_ID", "TOK_DOT", "TOK_COMMA", "TOK_WS", + "CHN_DOC_COMMENT", "CHN_BLOCK_COMMENT", "CHN_LINE_COMMENT", + "CHN_LBRACE", "CHN_RBRACE", "CHN_ID", "CHN_DOT", "CHN_COMMA", + "CHN_WS", "LEXER_CHAR_SET_BODY", "LEXER_CHAR_SET", "UNTERMINATED_CHAR_SET", + "Id" ] + + grammarFileName = "ANTLRv4Lexer.g4" + + def __init__(self, input=None): + super().__init__(input) + self.checkVersion("4.5") + self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) + self._actions = None + self._predicates = None + + + def action(self, localctx:RuleContext, ruleIndex:int, actionIndex:int): + if self._actions is None: + actions = dict() + actions[6] = self.BEGIN_ARGUMENT_action + actions[104] = self.END_ARGUMENT_action + actions[114] = self.END_ACTION_action + self._actions = actions + action = self._actions.get(ruleIndex, None) + if action is not None: + action(localctx, actionIndex) + else: + raise Exception("No registered action for:" + str(ruleIndex)) + + def BEGIN_ARGUMENT_action(self, localctx:RuleContext , actionIndex:int): + if actionIndex == 0: + self.handleBeginArgument() + + + def END_ARGUMENT_action(self, localctx:RuleContext , actionIndex:int): + if actionIndex == 1: + self.handleEndArgument() + + + def END_ACTION_action(self, localctx:RuleContext , actionIndex:int): + if actionIndex == 2: + self.handleEndAction() + + + diff --git a/tools/grammar-analysis/ANTLRv4Parser.py b/tools/grammar-analysis/ANTLRv4Parser.py new file mode 100644 index 00000000..ddd4e7a1 --- /dev/null +++ b/tools/grammar-analysis/ANTLRv4Parser.py @@ -0,0 +1,4765 @@ +# Generated from java-escape by ANTLR 4.5 +# encoding: utf-8 +from antlr4 import * +from io import StringIO +package = globals().get("__package__", None) +ischild = len(package)>0 if package is not None else False +if ischild: + from .ANTLRv4ParserListener import ANTLRv4ParserListener +else: + from ANTLRv4ParserListener import ANTLRv4ParserListener +def serializedATN(): + with StringIO() as buf: + buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\3?") + buf.write("\u0283\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7\t\7") + buf.write("\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r\4\16") + buf.write("\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23\t\23") + buf.write("\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30\4\31") + buf.write("\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36\t\36") + buf.write("\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%\4&\t") + buf.write("&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.\t.\4") + buf.write("/\t/\4\60\t\60\4\61\t\61\4\62\t\62\4\63\t\63\4\64\t\64") + buf.write("\4\65\t\65\4\66\t\66\4\67\t\67\48\t8\49\t9\4:\t:\4;\t") + buf.write(";\4<\t<\4=\t=\4>\t>\4?\t?\4@\t@\3\2\7\2\u0082\n\2\f\2") + buf.write("\16\2\u0085\13\2\3\2\3\2\3\2\3\2\7\2\u008b\n\2\f\2\16") + buf.write("\2\u008e\13\2\3\2\3\2\7\2\u0092\n\2\f\2\16\2\u0095\13") + buf.write("\2\3\2\3\2\3\3\3\3\3\3\3\3\3\3\5\3\u009e\n\3\3\4\3\4\3") + buf.write("\4\3\4\3\4\5\4\u00a5\n\4\3\5\3\5\3\5\3\5\3\5\7\5\u00ac") + buf.write("\n\5\f\5\16\5\u00af\13\5\3\5\3\5\3\6\3\6\3\6\3\6\3\7\3") + buf.write("\7\3\7\7\7\u00ba\n\7\f\7\16\7\u00bd\13\7\3\7\3\7\3\7\5") + buf.write("\7\u00c2\n\7\3\b\3\b\3\b\3\b\7\b\u00c8\n\b\f\b\16\b\u00cb") + buf.write("\13\b\3\b\3\b\3\t\3\t\3\t\3\t\3\t\5\t\u00d4\n\t\3\n\3") + buf.write("\n\3\n\5\n\u00d9\n\n\3\n\3\n\3\13\3\13\3\13\5\13\u00e0") + buf.write("\n\13\3\13\3\13\3\f\3\f\3\f\7\f\u00e7\n\f\f\f\16\f\u00ea") + buf.write("\13\f\3\f\5\f\u00ed\n\f\3\r\3\r\3\r\3\r\5\r\u00f3\n\r") + buf.write("\3\r\3\r\3\r\3\16\3\16\3\16\5\16\u00fb\n\16\3\17\3\17") + buf.write("\7\17\u00ff\n\17\f\17\16\17\u0102\13\17\3\17\3\17\3\20") + buf.write("\3\20\7\20\u0108\n\20\f\20\16\20\u010b\13\20\3\20\3\20") + buf.write("\3\21\3\21\3\21\3\21\7\21\u0113\n\21\f\21\16\21\u0116") + buf.write("\13\21\3\22\7\22\u0119\n\22\f\22\16\22\u011c\13\22\3\23") + buf.write("\3\23\5\23\u0120\n\23\3\24\7\24\u0123\n\24\f\24\16\24") + buf.write("\u0126\13\24\3\24\5\24\u0129\n\24\3\24\3\24\5\24\u012d") + buf.write("\n\24\3\24\5\24\u0130\n\24\3\24\5\24\u0133\n\24\3\24\5") + buf.write("\24\u0136\n\24\3\24\7\24\u0139\n\24\f\24\16\24\u013c\13") + buf.write("\24\3\24\3\24\3\24\3\24\3\24\3\25\7\25\u0144\n\25\f\25") + buf.write("\16\25\u0147\13\25\3\25\5\25\u014a\n\25\3\26\3\26\3\26") + buf.write("\3\26\3\27\3\27\3\27\3\30\3\30\5\30\u0155\n\30\3\31\3") + buf.write("\31\3\31\3\32\3\32\3\32\3\32\7\32\u015e\n\32\f\32\16\32") + buf.write("\u0161\13\32\3\33\3\33\3\33\3\34\3\34\3\34\3\34\3\35\6") + buf.write("\35\u016b\n\35\r\35\16\35\u016c\3\36\3\36\3\37\3\37\3") + buf.write(" \3 \3 \7 \u0176\n \f \16 \u0179\13 \3!\3!\3!\5!\u017e") + buf.write("\n!\3\"\7\"\u0181\n\"\f\"\16\"\u0184\13\"\3\"\5\"\u0187") + buf.write("\n\"\3\"\3\"\3\"\3\"\3\"\3#\3#\3$\3$\3$\7$\u0193\n$\f") + buf.write("$\16$\u0196\13$\3%\3%\5%\u019a\n%\3%\5%\u019d\n%\3&\6") + buf.write("&\u01a0\n&\r&\16&\u01a1\3\'\3\'\5\'\u01a6\n\'\3\'\3\'") + buf.write("\5\'\u01aa\n\'\3\'\3\'\5\'\u01ae\n\'\3\'\3\'\5\'\u01b2") + buf.write("\n\'\5\'\u01b4\n\'\3(\3(\3(\3(\5(\u01ba\n(\3)\3)\3)\3") + buf.write(")\3*\3*\3*\3*\7*\u01c4\n*\f*\16*\u01c7\13*\3+\3+\3+\3") + buf.write("+\3+\3+\5+\u01cf\n+\3,\3,\5,\u01d3\n,\3-\3-\5-\u01d7\n") + buf.write("-\3.\3.\3.\7.\u01dc\n.\f.\16.\u01df\13.\3/\5/\u01e2\n") + buf.write("/\3/\6/\u01e5\n/\r/\16/\u01e6\3/\5/\u01ea\n/\3\60\3\60") + buf.write("\3\60\5\60\u01ef\n\60\3\60\3\60\3\60\5\60\u01f4\n\60\3") + buf.write("\60\3\60\3\60\5\60\u01f9\n\60\5\60\u01fb\n\60\3\61\3\61") + buf.write("\3\61\3\61\5\61\u0201\n\61\3\62\3\62\5\62\u0205\n\62\3") + buf.write("\63\3\63\3\64\3\64\5\64\u020b\n\64\3\64\3\64\5\64\u020f") + buf.write("\n\64\3\64\3\64\5\64\u0213\n\64\5\64\u0215\n\64\3\65\3") + buf.write("\65\3\65\3\65\3\65\3\65\5\65\u021d\n\65\5\65\u021f\n\65") + buf.write("\3\66\3\66\3\66\3\66\3\66\3\66\5\66\u0227\n\66\5\66\u0229") + buf.write("\n\66\3\67\3\67\3\67\3\67\5\67\u022f\n\67\38\38\38\38") + buf.write("\78\u0235\n8\f8\168\u0238\138\38\38\39\39\59\u023e\n9") + buf.write("\39\39\59\u0242\n9\39\39\59\u0246\n9\3:\3:\5:\u024a\n") + buf.write(":\3:\7:\u024d\n:\f:\16:\u0250\13:\3:\5:\u0253\n:\3:\3") + buf.write(":\3:\3;\3;\5;\u025a\n;\3;\5;\u025d\n;\3<\3<\3<\3<\3=\3") + buf.write("=\5=\u0265\n=\3=\3=\5=\u0269\n=\5=\u026b\n=\3>\3>\3>\3") + buf.write(">\7>\u0271\n>\f>\16>\u0274\13>\3>\3>\3?\3?\3?\3?\3?\5") + buf.write("?\u027d\n?\5?\u027f\n?\3@\3@\3@\2\2A\2\4\6\b\n\f\16\20") + buf.write("\22\24\26\30\32\34\36 \"$&(*,.\60\62\64\668:<>@BDFHJL") + buf.write("NPRTVXZ\\^`bdfhjlnprtvxz|~\2\5\4\2\22\22\26\30\4\2**-") + buf.write("-\3\2\3\4\u02ac\2\u0083\3\2\2\2\4\u009d\3\2\2\2\6\u00a4") + buf.write("\3\2\2\2\b\u00a6\3\2\2\2\n\u00b2\3\2\2\2\f\u00c1\3\2\2") + buf.write("\2\16\u00c3\3\2\2\2\20\u00d3\3\2\2\2\22\u00d5\3\2\2\2") + buf.write("\24\u00dc\3\2\2\2\26\u00e3\3\2\2\2\30\u00ee\3\2\2\2\32") + buf.write("\u00fa\3\2\2\2\34\u00fc\3\2\2\2\36\u0105\3\2\2\2 \u010e") + buf.write("\3\2\2\2\"\u011a\3\2\2\2$\u011f\3\2\2\2&\u0124\3\2\2\2") + buf.write("(\u0145\3\2\2\2*\u014b\3\2\2\2,\u014f\3\2\2\2.\u0154\3") + buf.write("\2\2\2\60\u0156\3\2\2\2\62\u0159\3\2\2\2\64\u0162\3\2") + buf.write("\2\2\66\u0165\3\2\2\28\u016a\3\2\2\2:\u016e\3\2\2\2<\u0170") + buf.write("\3\2\2\2>\u0172\3\2\2\2@\u017a\3\2\2\2B\u0182\3\2\2\2") + buf.write("D\u018d\3\2\2\2F\u018f\3\2\2\2H\u019c\3\2\2\2J\u019f\3") + buf.write("\2\2\2L\u01b3\3\2\2\2N\u01b5\3\2\2\2P\u01bb\3\2\2\2R\u01bf") + buf.write("\3\2\2\2T\u01ce\3\2\2\2V\u01d2\3\2\2\2X\u01d6\3\2\2\2") + buf.write("Z\u01d8\3\2\2\2\\\u01e9\3\2\2\2^\u01fa\3\2\2\2`\u01fc") + buf.write("\3\2\2\2b\u0202\3\2\2\2d\u0206\3\2\2\2f\u0214\3\2\2\2") + buf.write("h\u021e\3\2\2\2j\u0228\3\2\2\2l\u022e\3\2\2\2n\u0230\3") + buf.write("\2\2\2p\u0245\3\2\2\2r\u0247\3\2\2\2t\u0257\3\2\2\2v\u025e") + buf.write("\3\2\2\2x\u026a\3\2\2\2z\u026c\3\2\2\2|\u027e\3\2\2\2") + buf.write("~\u0280\3\2\2\2\u0080\u0082\7\6\2\2\u0081\u0080\3\2\2") + buf.write("\2\u0082\u0085\3\2\2\2\u0083\u0081\3\2\2\2\u0083\u0084") + buf.write("\3\2\2\2\u0084\u0086\3\2\2\2\u0085\u0083\3\2\2\2\u0086") + buf.write("\u0087\5\4\3\2\u0087\u0088\5~@\2\u0088\u008c\7\"\2\2\u0089") + buf.write("\u008b\5\6\4\2\u008a\u0089\3\2\2\2\u008b\u008e\3\2\2\2") + buf.write("\u008c\u008a\3\2\2\2\u008c\u008d\3\2\2\2\u008d\u008f\3") + buf.write("\2\2\2\u008e\u008c\3\2\2\2\u008f\u0093\5\"\22\2\u0090") + buf.write("\u0092\5 \21\2\u0091\u0090\3\2\2\2\u0092\u0095\3\2\2\2") + buf.write("\u0093\u0091\3\2\2\2\u0093\u0094\3\2\2\2\u0094\u0096\3") + buf.write("\2\2\2\u0095\u0093\3\2\2\2\u0096\u0097\7\2\2\3\u0097\3") + buf.write("\3\2\2\2\u0098\u0099\7\23\2\2\u0099\u009e\7\25\2\2\u009a") + buf.write("\u009b\7\24\2\2\u009b\u009e\7\25\2\2\u009c\u009e\7\25") + buf.write("\2\2\u009d\u0098\3\2\2\2\u009d\u009a\3\2\2\2\u009d\u009c") + buf.write("\3\2\2\2\u009e\5\3\2\2\2\u009f\u00a5\5\b\5\2\u00a0\u00a5") + buf.write("\5\16\b\2\u00a1\u00a5\5\22\n\2\u00a2\u00a5\5\24\13\2\u00a3") + buf.write("\u00a5\5\30\r\2\u00a4\u009f\3\2\2\2\u00a4\u00a0\3\2\2") + buf.write("\2\u00a4\u00a1\3\2\2\2\u00a4\u00a2\3\2\2\2\u00a4\u00a3") + buf.write("\3\2\2\2\u00a5\7\3\2\2\2\u00a6\u00a7\7\16\2\2\u00a7\u00ad") + buf.write("\7%\2\2\u00a8\u00a9\5\n\6\2\u00a9\u00aa\7\"\2\2\u00aa") + buf.write("\u00ac\3\2\2\2\u00ab\u00a8\3\2\2\2\u00ac\u00af\3\2\2\2") + buf.write("\u00ad\u00ab\3\2\2\2\u00ad\u00ae\3\2\2\2\u00ae\u00b0\3") + buf.write("\2\2\2\u00af\u00ad\3\2\2\2\u00b0\u00b1\7&\2\2\u00b1\t") + buf.write("\3\2\2\2\u00b2\u00b3\5~@\2\u00b3\u00b4\7*\2\2\u00b4\u00b5") + buf.write("\5\f\7\2\u00b5\13\3\2\2\2\u00b6\u00bb\5~@\2\u00b7\u00b8") + buf.write("\7\62\2\2\u00b8\u00ba\5~@\2\u00b9\u00b7\3\2\2\2\u00ba") + buf.write("\u00bd\3\2\2\2\u00bb\u00b9\3\2\2\2\u00bb\u00bc\3\2\2\2") + buf.write("\u00bc\u00c2\3\2\2\2\u00bd\u00bb\3\2\2\2\u00be\u00c2\7") + buf.write("\n\2\2\u00bf\u00c2\5\34\17\2\u00c0\u00c2\7\t\2\2\u00c1") + buf.write("\u00b6\3\2\2\2\u00c1\u00be\3\2\2\2\u00c1\u00bf\3\2\2\2") + buf.write("\u00c1\u00c0\3\2\2\2\u00c2\r\3\2\2\2\u00c3\u00c4\7\21") + buf.write("\2\2\u00c4\u00c9\5\20\t\2\u00c5\u00c6\7!\2\2\u00c6\u00c8") + buf.write("\5\20\t\2\u00c7\u00c5\3\2\2\2\u00c8\u00cb\3\2\2\2\u00c9") + buf.write("\u00c7\3\2\2\2\u00c9\u00ca\3\2\2\2\u00ca\u00cc\3\2\2\2") + buf.write("\u00cb\u00c9\3\2\2\2\u00cc\u00cd\7\"\2\2\u00cd\17\3\2") + buf.write("\2\2\u00ce\u00cf\5~@\2\u00cf\u00d0\7*\2\2\u00d0\u00d1") + buf.write("\5~@\2\u00d1\u00d4\3\2\2\2\u00d2\u00d4\5~@\2\u00d3\u00ce") + buf.write("\3\2\2\2\u00d3\u00d2\3\2\2\2\u00d4\21\3\2\2\2\u00d5\u00d6") + buf.write("\7\17\2\2\u00d6\u00d8\7%\2\2\u00d7\u00d9\5\26\f\2\u00d8") + buf.write("\u00d7\3\2\2\2\u00d8\u00d9\3\2\2\2\u00d9\u00da\3\2\2\2") + buf.write("\u00da\u00db\7&\2\2\u00db\23\3\2\2\2\u00dc\u00dd\7\20") + buf.write("\2\2\u00dd\u00df\7%\2\2\u00de\u00e0\5\26\f\2\u00df\u00de") + buf.write("\3\2\2\2\u00df\u00e0\3\2\2\2\u00e0\u00e1\3\2\2\2\u00e1") + buf.write("\u00e2\7&\2\2\u00e2\25\3\2\2\2\u00e3\u00e8\5~@\2\u00e4") + buf.write("\u00e5\7!\2\2\u00e5\u00e7\5~@\2\u00e6\u00e4\3\2\2\2\u00e7") + buf.write("\u00ea\3\2\2\2\u00e8\u00e6\3\2\2\2\u00e8\u00e9\3\2\2\2") + buf.write("\u00e9\u00ec\3\2\2\2\u00ea\u00e8\3\2\2\2\u00eb\u00ed\7") + buf.write("!\2\2\u00ec\u00eb\3\2\2\2\u00ec\u00ed\3\2\2\2\u00ed\27") + buf.write("\3\2\2\2\u00ee\u00f2\7\63\2\2\u00ef\u00f0\5\32\16\2\u00f0") + buf.write("\u00f1\7 \2\2\u00f1\u00f3\3\2\2\2\u00f2\u00ef\3\2\2\2") + buf.write("\u00f2\u00f3\3\2\2\2\u00f3\u00f4\3\2\2\2\u00f4\u00f5\5") + buf.write("~@\2\u00f5\u00f6\5\34\17\2\u00f6\31\3\2\2\2\u00f7\u00fb") + buf.write("\5~@\2\u00f8\u00fb\7\23\2\2\u00f9\u00fb\7\24\2\2\u00fa") + buf.write("\u00f7\3\2\2\2\u00fa\u00f8\3\2\2\2\u00fa\u00f9\3\2\2\2") + buf.write("\u00fb\33\3\2\2\2\u00fc\u0100\7\r\2\2\u00fd\u00ff\7>\2") + buf.write("\2\u00fe\u00fd\3\2\2\2\u00ff\u0102\3\2\2\2\u0100\u00fe") + buf.write("\3\2\2\2\u0100\u0101\3\2\2\2\u0101\u0103\3\2\2\2\u0102") + buf.write("\u0100\3\2\2\2\u0103\u0104\7<\2\2\u0104\35\3\2\2\2\u0105") + buf.write("\u0109\7\f\2\2\u0106\u0108\7;\2\2\u0107\u0106\3\2\2\2") + buf.write("\u0108\u010b\3\2\2\2\u0109\u0107\3\2\2\2\u0109\u010a\3") + buf.write("\2\2\2\u010a\u010c\3\2\2\2\u010b\u0109\3\2\2\2\u010c\u010d") + buf.write("\79\2\2\u010d\37\3\2\2\2\u010e\u010f\7\36\2\2\u010f\u0110") + buf.write("\5~@\2\u0110\u0114\7\"\2\2\u0111\u0113\5B\"\2\u0112\u0111") + buf.write("\3\2\2\2\u0113\u0116\3\2\2\2\u0114\u0112\3\2\2\2\u0114") + buf.write("\u0115\3\2\2\2\u0115!\3\2\2\2\u0116\u0114\3\2\2\2\u0117") + buf.write("\u0119\5$\23\2\u0118\u0117\3\2\2\2\u0119\u011c\3\2\2\2") + buf.write("\u011a\u0118\3\2\2\2\u011a\u011b\3\2\2\2\u011b#\3\2\2") + buf.write("\2\u011c\u011a\3\2\2\2\u011d\u0120\5&\24\2\u011e\u0120") + buf.write("\5B\"\2\u011f\u011d\3\2\2\2\u011f\u011e\3\2\2\2\u0120") + buf.write("%\3\2\2\2\u0121\u0123\7\6\2\2\u0122\u0121\3\2\2\2\u0123") + buf.write("\u0126\3\2\2\2\u0124\u0122\3\2\2\2\u0124\u0125\3\2\2\2") + buf.write("\u0125\u0128\3\2\2\2\u0126\u0124\3\2\2\2\u0127\u0129\5") + buf.write("8\35\2\u0128\u0127\3\2\2\2\u0128\u0129\3\2\2\2\u0129\u012a") + buf.write("\3\2\2\2\u012a\u012c\7\4\2\2\u012b\u012d\5\36\20\2\u012c") + buf.write("\u012b\3\2\2\2\u012c\u012d\3\2\2\2\u012d\u012f\3\2\2\2") + buf.write("\u012e\u0130\5\60\31\2\u012f\u012e\3\2\2\2\u012f\u0130") + buf.write("\3\2\2\2\u0130\u0132\3\2\2\2\u0131\u0133\5\62\32\2\u0132") + buf.write("\u0131\3\2\2\2\u0132\u0133\3\2\2\2\u0133\u0135\3\2\2\2") + buf.write("\u0134\u0136\5\64\33\2\u0135\u0134\3\2\2\2\u0135\u0136") + buf.write("\3\2\2\2\u0136\u013a\3\2\2\2\u0137\u0139\5.\30\2\u0138") + buf.write("\u0137\3\2\2\2\u0139\u013c\3\2\2\2\u013a\u0138\3\2\2\2") + buf.write("\u013a\u013b\3\2\2\2\u013b\u013d\3\2\2\2\u013c\u013a\3") + buf.write("\2\2\2\u013d\u013e\7\37\2\2\u013e\u013f\5<\37\2\u013f") + buf.write("\u0140\7\"\2\2\u0140\u0141\5(\25\2\u0141\'\3\2\2\2\u0142") + buf.write("\u0144\5*\26\2\u0143\u0142\3\2\2\2\u0144\u0147\3\2\2\2") + buf.write("\u0145\u0143\3\2\2\2\u0145\u0146\3\2\2\2\u0146\u0149\3") + buf.write("\2\2\2\u0147\u0145\3\2\2\2\u0148\u014a\5,\27\2\u0149\u0148") + buf.write("\3\2\2\2\u0149\u014a\3\2\2\2\u014a)\3\2\2\2\u014b\u014c") + buf.write("\7\34\2\2\u014c\u014d\5\36\20\2\u014d\u014e\5\34\17\2") + buf.write("\u014e+\3\2\2\2\u014f\u0150\7\35\2\2\u0150\u0151\5\34") + buf.write("\17\2\u0151-\3\2\2\2\u0152\u0155\5\b\5\2\u0153\u0155\5") + buf.write("\66\34\2\u0154\u0152\3\2\2\2\u0154\u0153\3\2\2\2\u0155") + buf.write("/\3\2\2\2\u0156\u0157\7\31\2\2\u0157\u0158\5\36\20\2\u0158") + buf.write("\61\3\2\2\2\u0159\u015a\7\33\2\2\u015a\u015f\5~@\2\u015b") + buf.write("\u015c\7!\2\2\u015c\u015e\5~@\2\u015d\u015b\3\2\2\2\u015e") + buf.write("\u0161\3\2\2\2\u015f\u015d\3\2\2\2\u015f\u0160\3\2\2\2") + buf.write("\u0160\63\3\2\2\2\u0161\u015f\3\2\2\2\u0162\u0163\7\32") + buf.write("\2\2\u0163\u0164\5\36\20\2\u0164\65\3\2\2\2\u0165\u0166") + buf.write("\7\63\2\2\u0166\u0167\5~@\2\u0167\u0168\5\34\17\2\u0168") + buf.write("\67\3\2\2\2\u0169\u016b\5:\36\2\u016a\u0169\3\2\2\2\u016b") + buf.write("\u016c\3\2\2\2\u016c\u016a\3\2\2\2\u016c\u016d\3\2\2\2") + buf.write("\u016d9\3\2\2\2\u016e\u016f\t\2\2\2\u016f;\3\2\2\2\u0170") + buf.write("\u0171\5> \2\u0171=\3\2\2\2\u0172\u0177\5@!\2\u0173\u0174") + buf.write("\7/\2\2\u0174\u0176\5@!\2\u0175\u0173\3\2\2\2\u0176\u0179") + buf.write("\3\2\2\2\u0177\u0175\3\2\2\2\u0177\u0178\3\2\2\2\u0178") + buf.write("?\3\2\2\2\u0179\u0177\3\2\2\2\u017a\u017d\5\\/\2\u017b") + buf.write("\u017c\7\64\2\2\u017c\u017e\5~@\2\u017d\u017b\3\2\2\2") + buf.write("\u017d\u017e\3\2\2\2\u017eA\3\2\2\2\u017f\u0181\7\6\2") + buf.write("\2\u0180\u017f\3\2\2\2\u0181\u0184\3\2\2\2\u0182\u0180") + buf.write("\3\2\2\2\u0182\u0183\3\2\2\2\u0183\u0186\3\2\2\2\u0184") + buf.write("\u0182\3\2\2\2\u0185\u0187\7\22\2\2\u0186\u0185\3\2\2") + buf.write("\2\u0186\u0187\3\2\2\2\u0187\u0188\3\2\2\2\u0188\u0189") + buf.write("\7\3\2\2\u0189\u018a\7\37\2\2\u018a\u018b\5D#\2\u018b") + buf.write("\u018c\7\"\2\2\u018cC\3\2\2\2\u018d\u018e\5F$\2\u018e") + buf.write("E\3\2\2\2\u018f\u0194\5H%\2\u0190\u0191\7/\2\2\u0191\u0193") + buf.write("\5H%\2\u0192\u0190\3\2\2\2\u0193\u0196\3\2\2\2\u0194\u0192") + buf.write("\3\2\2\2\u0194\u0195\3\2\2\2\u0195G\3\2\2\2\u0196\u0194") + buf.write("\3\2\2\2\u0197\u0199\5J&\2\u0198\u019a\5R*\2\u0199\u0198") + buf.write("\3\2\2\2\u0199\u019a\3\2\2\2\u019a\u019d\3\2\2\2\u019b") + buf.write("\u019d\3\2\2\2\u019c\u0197\3\2\2\2\u019c\u019b\3\2\2\2") + buf.write("\u019dI\3\2\2\2\u019e\u01a0\5L\'\2\u019f\u019e\3\2\2\2") + buf.write("\u01a0\u01a1\3\2\2\2\u01a1\u019f\3\2\2\2\u01a1\u01a2\3") + buf.write("\2\2\2\u01a2K\3\2\2\2\u01a3\u01a5\5N(\2\u01a4\u01a6\5") + buf.write("f\64\2\u01a5\u01a4\3\2\2\2\u01a5\u01a6\3\2\2\2\u01a6\u01b4") + buf.write("\3\2\2\2\u01a7\u01a9\5h\65\2\u01a8\u01aa\5f\64\2\u01a9") + buf.write("\u01a8\3\2\2\2\u01a9\u01aa\3\2\2\2\u01aa\u01b4\3\2\2\2") + buf.write("\u01ab\u01ad\5P)\2\u01ac\u01ae\5f\64\2\u01ad\u01ac\3\2") + buf.write("\2\2\u01ad\u01ae\3\2\2\2\u01ae\u01b4\3\2\2\2\u01af\u01b1") + buf.write("\5\34\17\2\u01b0\u01b2\7+\2\2\u01b1\u01b0\3\2\2\2\u01b1") + buf.write("\u01b2\3\2\2\2\u01b2\u01b4\3\2\2\2\u01b3\u01a3\3\2\2\2") + buf.write("\u01b3\u01a7\3\2\2\2\u01b3\u01ab\3\2\2\2\u01b3\u01af\3") + buf.write("\2\2\2\u01b4M\3\2\2\2\u01b5\u01b6\5~@\2\u01b6\u01b9\t") + buf.write("\3\2\2\u01b7\u01ba\5h\65\2\u01b8\u01ba\5r:\2\u01b9\u01b7") + buf.write("\3\2\2\2\u01b9\u01b8\3\2\2\2\u01baO\3\2\2\2\u01bb\u01bc") + buf.write("\7#\2\2\u01bc\u01bd\5F$\2\u01bd\u01be\7$\2\2\u01beQ\3") + buf.write("\2\2\2\u01bf\u01c0\7\'\2\2\u01c0\u01c5\5T+\2\u01c1\u01c2") + buf.write("\7!\2\2\u01c2\u01c4\5T+\2\u01c3\u01c1\3\2\2\2\u01c4\u01c7") + buf.write("\3\2\2\2\u01c5\u01c3\3\2\2\2\u01c5\u01c6\3\2\2\2\u01c6") + buf.write("S\3\2\2\2\u01c7\u01c5\3\2\2\2\u01c8\u01c9\5V,\2\u01c9") + buf.write("\u01ca\7#\2\2\u01ca\u01cb\5X-\2\u01cb\u01cc\7$\2\2\u01cc") + buf.write("\u01cf\3\2\2\2\u01cd\u01cf\5V,\2\u01ce\u01c8\3\2\2\2\u01ce") + buf.write("\u01cd\3\2\2\2\u01cfU\3\2\2\2\u01d0\u01d3\5~@\2\u01d1") + buf.write("\u01d3\7\36\2\2\u01d2\u01d0\3\2\2\2\u01d2\u01d1\3\2\2") + buf.write("\2\u01d3W\3\2\2\2\u01d4\u01d7\5~@\2\u01d5\u01d7\7\t\2") + buf.write("\2\u01d6\u01d4\3\2\2\2\u01d6\u01d5\3\2\2\2\u01d7Y\3\2") + buf.write("\2\2\u01d8\u01dd\5\\/\2\u01d9\u01da\7/\2\2\u01da\u01dc") + buf.write("\5\\/\2\u01db\u01d9\3\2\2\2\u01dc\u01df\3\2\2\2\u01dd") + buf.write("\u01db\3\2\2\2\u01dd\u01de\3\2\2\2\u01de[\3\2\2\2\u01df") + buf.write("\u01dd\3\2\2\2\u01e0\u01e2\5z>\2\u01e1\u01e0\3\2\2\2\u01e1") + buf.write("\u01e2\3\2\2\2\u01e2\u01e4\3\2\2\2\u01e3\u01e5\5^\60\2") + buf.write("\u01e4\u01e3\3\2\2\2\u01e5\u01e6\3\2\2\2\u01e6\u01e4\3") + buf.write("\2\2\2\u01e6\u01e7\3\2\2\2\u01e7\u01ea\3\2\2\2\u01e8\u01ea") + buf.write("\3\2\2\2\u01e9\u01e1\3\2\2\2\u01e9\u01e8\3\2\2\2\u01ea") + buf.write("]\3\2\2\2\u01eb\u01ee\5`\61\2\u01ec\u01ef\5f\64\2\u01ed") + buf.write("\u01ef\3\2\2\2\u01ee\u01ec\3\2\2\2\u01ee\u01ed\3\2\2\2") + buf.write("\u01ef\u01fb\3\2\2\2\u01f0\u01f3\5j\66\2\u01f1\u01f4\5") + buf.write("f\64\2\u01f2\u01f4\3\2\2\2\u01f3\u01f1\3\2\2\2\u01f3\u01f2") + buf.write("\3\2\2\2\u01f4\u01fb\3\2\2\2\u01f5\u01fb\5b\62\2\u01f6") + buf.write("\u01f8\5\34\17\2\u01f7\u01f9\7+\2\2\u01f8\u01f7\3\2\2") + buf.write("\2\u01f8\u01f9\3\2\2\2\u01f9\u01fb\3\2\2\2\u01fa\u01eb") + buf.write("\3\2\2\2\u01fa\u01f0\3\2\2\2\u01fa\u01f5\3\2\2\2\u01fa") + buf.write("\u01f6\3\2\2\2\u01fb_\3\2\2\2\u01fc\u01fd\5~@\2\u01fd") + buf.write("\u0200\t\3\2\2\u01fe\u0201\5j\66\2\u01ff\u0201\5r:\2\u0200") + buf.write("\u01fe\3\2\2\2\u0200\u01ff\3\2\2\2\u0201a\3\2\2\2\u0202") + buf.write("\u0204\5r:\2\u0203\u0205\5d\63\2\u0204\u0203\3\2\2\2\u0204") + buf.write("\u0205\3\2\2\2\u0205c\3\2\2\2\u0206\u0207\5f\64\2\u0207") + buf.write("e\3\2\2\2\u0208\u020a\7+\2\2\u0209\u020b\7+\2\2\u020a") + buf.write("\u0209\3\2\2\2\u020a\u020b\3\2\2\2\u020b\u0215\3\2\2\2") + buf.write("\u020c\u020e\7,\2\2\u020d\u020f\7+\2\2\u020e\u020d\3\2") + buf.write("\2\2\u020e\u020f\3\2\2\2\u020f\u0215\3\2\2\2\u0210\u0212") + buf.write("\7.\2\2\u0211\u0213\7+\2\2\u0212\u0211\3\2\2\2\u0212\u0213") + buf.write("\3\2\2\2\u0213\u0215\3\2\2\2\u0214\u0208\3\2\2\2\u0214") + buf.write("\u020c\3\2\2\2\u0214\u0210\3\2\2\2\u0215g\3\2\2\2\u0216") + buf.write("\u021f\5v<\2\u0217\u021f\5x=\2\u0218\u021f\5l\67\2\u0219") + buf.write("\u021f\7\5\2\2\u021a\u021c\7\62\2\2\u021b\u021d\5z>\2") + buf.write("\u021c\u021b\3\2\2\2\u021c\u021d\3\2\2\2\u021d\u021f\3") + buf.write("\2\2\2\u021e\u0216\3\2\2\2\u021e\u0217\3\2\2\2\u021e\u0218") + buf.write("\3\2\2\2\u021e\u0219\3\2\2\2\u021e\u021a\3\2\2\2\u021f") + buf.write("i\3\2\2\2\u0220\u0229\5v<\2\u0221\u0229\5x=\2\u0222\u0229") + buf.write("\5t;\2\u0223\u0229\5l\67\2\u0224\u0226\7\62\2\2\u0225") + buf.write("\u0227\5z>\2\u0226\u0225\3\2\2\2\u0226\u0227\3\2\2\2\u0227") + buf.write("\u0229\3\2\2\2\u0228\u0220\3\2\2\2\u0228\u0221\3\2\2\2") + buf.write("\u0228\u0222\3\2\2\2\u0228\u0223\3\2\2\2\u0228\u0224\3") + buf.write("\2\2\2\u0229k\3\2\2\2\u022a\u022b\7\65\2\2\u022b\u022f") + buf.write("\5p9\2\u022c\u022d\7\65\2\2\u022d\u022f\5n8\2\u022e\u022a") + buf.write("\3\2\2\2\u022e\u022c\3\2\2\2\u022fm\3\2\2\2\u0230\u0231") + buf.write("\7#\2\2\u0231\u0236\5p9\2\u0232\u0233\7/\2\2\u0233\u0235") + buf.write("\5p9\2\u0234\u0232\3\2\2\2\u0235\u0238\3\2\2\2\u0236\u0234") + buf.write("\3\2\2\2\u0236\u0237\3\2\2\2\u0237\u0239\3\2\2\2\u0238") + buf.write("\u0236\3\2\2\2\u0239\u023a\7$\2\2\u023ao\3\2\2\2\u023b") + buf.write("\u023d\7\3\2\2\u023c\u023e\5z>\2\u023d\u023c\3\2\2\2\u023d") + buf.write("\u023e\3\2\2\2\u023e\u0246\3\2\2\2\u023f\u0241\7\n\2\2") + buf.write("\u0240\u0242\5z>\2\u0241\u0240\3\2\2\2\u0241\u0242\3\2") + buf.write("\2\2\u0242\u0246\3\2\2\2\u0243\u0246\5v<\2\u0244\u0246") + buf.write("\7\5\2\2\u0245\u023b\3\2\2\2\u0245\u023f\3\2\2\2\u0245") + buf.write("\u0243\3\2\2\2\u0245\u0244\3\2\2\2\u0246q\3\2\2\2\u0247") + buf.write("\u0252\7#\2\2\u0248\u024a\5\b\5\2\u0249\u0248\3\2\2\2") + buf.write("\u0249\u024a\3\2\2\2\u024a\u024e\3\2\2\2\u024b\u024d\5") + buf.write("\66\34\2\u024c\u024b\3\2\2\2\u024d\u0250\3\2\2\2\u024e") + buf.write("\u024c\3\2\2\2\u024e\u024f\3\2\2\2\u024f\u0251\3\2\2\2") + buf.write("\u0250\u024e\3\2\2\2\u0251\u0253\7\37\2\2\u0252\u0249") + buf.write("\3\2\2\2\u0252\u0253\3\2\2\2\u0253\u0254\3\2\2\2\u0254") + buf.write("\u0255\5Z.\2\u0255\u0256\7$\2\2\u0256s\3\2\2\2\u0257\u0259") + buf.write("\7\4\2\2\u0258\u025a\5\36\20\2\u0259\u0258\3\2\2\2\u0259") + buf.write("\u025a\3\2\2\2\u025a\u025c\3\2\2\2\u025b\u025d\5z>\2\u025c") + buf.write("\u025b\3\2\2\2\u025c\u025d\3\2\2\2\u025du\3\2\2\2\u025e") + buf.write("\u025f\7\n\2\2\u025f\u0260\7\61\2\2\u0260\u0261\7\n\2") + buf.write("\2\u0261w\3\2\2\2\u0262\u0264\7\3\2\2\u0263\u0265\5z>") + buf.write("\2\u0264\u0263\3\2\2\2\u0264\u0265\3\2\2\2\u0265\u026b") + buf.write("\3\2\2\2\u0266\u0268\7\n\2\2\u0267\u0269\5z>\2\u0268\u0267") + buf.write("\3\2\2\2\u0268\u0269\3\2\2\2\u0269\u026b\3\2\2\2\u026a") + buf.write("\u0262\3\2\2\2\u026a\u0266\3\2\2\2\u026by\3\2\2\2\u026c") + buf.write("\u026d\7(\2\2\u026d\u0272\5|?\2\u026e\u026f\7!\2\2\u026f") + buf.write("\u0271\5|?\2\u0270\u026e\3\2\2\2\u0271\u0274\3\2\2\2\u0272") + buf.write("\u0270\3\2\2\2\u0272\u0273\3\2\2\2\u0273\u0275\3\2\2\2") + buf.write("\u0274\u0272\3\2\2\2\u0275\u0276\7)\2\2\u0276{\3\2\2\2") + buf.write("\u0277\u027f\5~@\2\u0278\u0279\5~@\2\u0279\u027c\7*\2") + buf.write("\2\u027a\u027d\5~@\2\u027b\u027d\7\n\2\2\u027c\u027a\3") + buf.write("\2\2\2\u027c\u027b\3\2\2\2\u027d\u027f\3\2\2\2\u027e\u0277") + buf.write("\3\2\2\2\u027e\u0278\3\2\2\2\u027f}\3\2\2\2\u0280\u0281") + buf.write("\t\4\2\2\u0281\177\3\2\2\2W\u0083\u008c\u0093\u009d\u00a4") + buf.write("\u00ad\u00bb\u00c1\u00c9\u00d3\u00d8\u00df\u00e8\u00ec") + buf.write("\u00f2\u00fa\u0100\u0109\u0114\u011a\u011f\u0124\u0128") + buf.write("\u012c\u012f\u0132\u0135\u013a\u0145\u0149\u0154\u015f") + buf.write("\u016c\u0177\u017d\u0182\u0186\u0194\u0199\u019c\u01a1") + buf.write("\u01a5\u01a9\u01ad\u01b1\u01b3\u01b9\u01c5\u01ce\u01d2") + buf.write("\u01d6\u01dd\u01e1\u01e6\u01e9\u01ee\u01f3\u01f8\u01fa") + buf.write("\u0200\u0204\u020a\u020e\u0212\u0214\u021c\u021e\u0226") + buf.write("\u0228\u022e\u0236\u023d\u0241\u0245\u0249\u024e\u0252") + buf.write("\u0259\u025c\u0264\u0268\u026a\u0272\u027c\u027e") + return buf.getvalue() + + +class ANTLRv4Parser ( Parser ): + + grammarFileName = "java-escape" + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + sharedContextCache = PredictionContextCache() + + literalNames = [ u"", u"", u"", u"", + u"", u"", u"", u"", + u"", u"", u"", u"", + u"'options'", u"'tokens'", u"'channels'", u"'import'", + u"'fragment'", u"'lexer'", u"'parser'", u"'grammar'", + u"'protected'", u"'public'", u"'private'", u"'returns'", + u"'locals'", u"'throws'", u"'catch'", u"'finally'", + u"'mode'" ] + + symbolicNames = [ u"", u"TOKEN_REF", u"RULE_REF", u"LEXER_CHAR_SET", + u"DOC_COMMENT", u"BLOCK_COMMENT", u"LINE_COMMENT", + u"INT", u"STRING_LITERAL", u"UNTERMINATED_STRING_LITERAL", + u"BEGIN_ARGUMENT", u"BEGIN_ACTION", u"OPTIONS", u"TOKENS", + u"CHANNELS", u"IMPORT", u"FRAGMENT", u"LEXER", u"PARSER", + u"GRAMMAR", u"PROTECTED", u"PUBLIC", u"PRIVATE", u"RETURNS", + u"LOCALS", u"THROWS", u"CATCH", u"FINALLY", u"MODE", + u"COLON", u"COLONCOLON", u"COMMA", u"SEMI", u"LPAREN", + u"RPAREN", u"LBRACE", u"RBRACE", u"RARROW", u"LT", + u"GT", u"ASSIGN", u"QUESTION", u"STAR", u"PLUS_ASSIGN", + u"PLUS", u"OR", u"DOLLAR", u"RANGE", u"DOT", u"AT", + u"POUND", u"NOT", u"ID", u"WS", u"ERRCHAR", u"END_ARGUMENT", + u"UNTERMINATED_ARGUMENT", u"ARGUMENT_CONTENT", u"END_ACTION", + u"UNTERMINATED_ACTION", u"ACTION_CONTENT", u"UNTERMINATED_CHAR_SET" ] + + RULE_grammarSpec = 0 + RULE_grammarType = 1 + RULE_prequelConstruct = 2 + RULE_optionsSpec = 3 + RULE_option = 4 + RULE_optionValue = 5 + RULE_delegateGrammars = 6 + RULE_delegateGrammar = 7 + RULE_tokensSpec = 8 + RULE_channelsSpec = 9 + RULE_idList = 10 + RULE_action = 11 + RULE_actionScopeName = 12 + RULE_actionBlock = 13 + RULE_argActionBlock = 14 + RULE_modeSpec = 15 + RULE_rules = 16 + RULE_ruleSpec = 17 + RULE_parserRuleSpec = 18 + RULE_exceptionGroup = 19 + RULE_exceptionHandler = 20 + RULE_finallyClause = 21 + RULE_rulePrequel = 22 + RULE_ruleReturns = 23 + RULE_throwsSpec = 24 + RULE_localsSpec = 25 + RULE_ruleAction = 26 + RULE_ruleModifiers = 27 + RULE_ruleModifier = 28 + RULE_ruleBlock = 29 + RULE_ruleAltList = 30 + RULE_labeledAlt = 31 + RULE_lexerRuleSpec = 32 + RULE_lexerRuleBlock = 33 + RULE_lexerAltList = 34 + RULE_lexerAlt = 35 + RULE_lexerElements = 36 + RULE_lexerElement = 37 + RULE_labeledLexerElement = 38 + RULE_lexerBlock = 39 + RULE_lexerCommands = 40 + RULE_lexerCommand = 41 + RULE_lexerCommandName = 42 + RULE_lexerCommandExpr = 43 + RULE_altList = 44 + RULE_alternative = 45 + RULE_element = 46 + RULE_labeledElement = 47 + RULE_ebnf = 48 + RULE_blockSuffix = 49 + RULE_ebnfSuffix = 50 + RULE_lexerAtom = 51 + RULE_atom = 52 + RULE_notSet = 53 + RULE_blockSet = 54 + RULE_setElement = 55 + RULE_block = 56 + RULE_ruleref = 57 + RULE_characterRange = 58 + RULE_terminal = 59 + RULE_elementOptions = 60 + RULE_elementOption = 61 + RULE_identifier = 62 + + ruleNames = [ "grammarSpec", "grammarType", "prequelConstruct", "optionsSpec", + "option", "optionValue", "delegateGrammars", "delegateGrammar", + "tokensSpec", "channelsSpec", "idList", "action", "actionScopeName", + "actionBlock", "argActionBlock", "modeSpec", "rules", + "ruleSpec", "parserRuleSpec", "exceptionGroup", "exceptionHandler", + "finallyClause", "rulePrequel", "ruleReturns", "throwsSpec", + "localsSpec", "ruleAction", "ruleModifiers", "ruleModifier", + "ruleBlock", "ruleAltList", "labeledAlt", "lexerRuleSpec", + "lexerRuleBlock", "lexerAltList", "lexerAlt", "lexerElements", + "lexerElement", "labeledLexerElement", "lexerBlock", + "lexerCommands", "lexerCommand", "lexerCommandName", + "lexerCommandExpr", "altList", "alternative", "element", + "labeledElement", "ebnf", "blockSuffix", "ebnfSuffix", + "lexerAtom", "atom", "notSet", "blockSet", "setElement", + "block", "ruleref", "characterRange", "terminal", "elementOptions", + "elementOption", "identifier" ] + + EOF = Token.EOF + TOKEN_REF=1 + RULE_REF=2 + LEXER_CHAR_SET=3 + DOC_COMMENT=4 + BLOCK_COMMENT=5 + LINE_COMMENT=6 + INT=7 + STRING_LITERAL=8 + UNTERMINATED_STRING_LITERAL=9 + BEGIN_ARGUMENT=10 + BEGIN_ACTION=11 + OPTIONS=12 + TOKENS=13 + CHANNELS=14 + IMPORT=15 + FRAGMENT=16 + LEXER=17 + PARSER=18 + GRAMMAR=19 + PROTECTED=20 + PUBLIC=21 + PRIVATE=22 + RETURNS=23 + LOCALS=24 + THROWS=25 + CATCH=26 + FINALLY=27 + MODE=28 + COLON=29 + COLONCOLON=30 + COMMA=31 + SEMI=32 + LPAREN=33 + RPAREN=34 + LBRACE=35 + RBRACE=36 + RARROW=37 + LT=38 + GT=39 + ASSIGN=40 + QUESTION=41 + STAR=42 + PLUS_ASSIGN=43 + PLUS=44 + OR=45 + DOLLAR=46 + RANGE=47 + DOT=48 + AT=49 + POUND=50 + NOT=51 + ID=52 + WS=53 + ERRCHAR=54 + END_ARGUMENT=55 + UNTERMINATED_ARGUMENT=56 + ARGUMENT_CONTENT=57 + END_ACTION=58 + UNTERMINATED_ACTION=59 + ACTION_CONTENT=60 + UNTERMINATED_CHAR_SET=61 + + def __init__(self, input:TokenStream): + super().__init__(input) + self.checkVersion("4.5") + self._interp = ParserATNSimulator(self, self.atn, self.decisionsToDFA, self.sharedContextCache) + self._predicates = None + + + + class GrammarSpecContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def grammarType(self): + return self.getTypedRuleContext(ANTLRv4Parser.GrammarTypeContext,0) + + + def identifier(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0) + + + def SEMI(self): + return self.getToken(ANTLRv4Parser.SEMI, 0) + + def rules(self): + return self.getTypedRuleContext(ANTLRv4Parser.RulesContext,0) + + + def EOF(self): + return self.getToken(ANTLRv4Parser.EOF, 0) + + def DOC_COMMENT(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.DOC_COMMENT) + else: + return self.getToken(ANTLRv4Parser.DOC_COMMENT, i) + + def prequelConstruct(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.PrequelConstructContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.PrequelConstructContext,i) + + + def modeSpec(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.ModeSpecContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.ModeSpecContext,i) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_grammarSpec + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterGrammarSpec(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitGrammarSpec(self) + + + + + def grammarSpec(self): + + localctx = ANTLRv4Parser.GrammarSpecContext(self, self._ctx, self.state) + self.enterRule(localctx, 0, self.RULE_grammarSpec) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 129 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.DOC_COMMENT: + self.state = 126 + self.match(ANTLRv4Parser.DOC_COMMENT) + self.state = 131 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 132 + self.grammarType() + self.state = 133 + self.identifier() + self.state = 134 + self.match(ANTLRv4Parser.SEMI) + self.state = 138 + self._errHandler.sync(self) + _la = self._input.LA(1) + while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.OPTIONS) | (1 << ANTLRv4Parser.TOKENS) | (1 << ANTLRv4Parser.CHANNELS) | (1 << ANTLRv4Parser.IMPORT) | (1 << ANTLRv4Parser.AT))) != 0): + self.state = 135 + self.prequelConstruct() + self.state = 140 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 141 + self.rules() + self.state = 145 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.MODE: + self.state = 142 + self.modeSpec() + self.state = 147 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 148 + self.match(ANTLRv4Parser.EOF) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class GrammarTypeContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LEXER(self): + return self.getToken(ANTLRv4Parser.LEXER, 0) + + def GRAMMAR(self): + return self.getToken(ANTLRv4Parser.GRAMMAR, 0) + + def PARSER(self): + return self.getToken(ANTLRv4Parser.PARSER, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_grammarType + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterGrammarType(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitGrammarType(self) + + + + + def grammarType(self): + + localctx = ANTLRv4Parser.GrammarTypeContext(self, self._ctx, self.state) + self.enterRule(localctx, 2, self.RULE_grammarType) + try: + self.enterOuterAlt(localctx, 1) + self.state = 155 + token = self._input.LA(1) + if token in [ANTLRv4Parser.LEXER]: + self.state = 150 + self.match(ANTLRv4Parser.LEXER) + self.state = 151 + self.match(ANTLRv4Parser.GRAMMAR) + + elif token in [ANTLRv4Parser.PARSER]: + self.state = 152 + self.match(ANTLRv4Parser.PARSER) + self.state = 153 + self.match(ANTLRv4Parser.GRAMMAR) + + elif token in [ANTLRv4Parser.GRAMMAR]: + self.state = 154 + self.match(ANTLRv4Parser.GRAMMAR) + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class PrequelConstructContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def optionsSpec(self): + return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0) + + + def delegateGrammars(self): + return self.getTypedRuleContext(ANTLRv4Parser.DelegateGrammarsContext,0) + + + def tokensSpec(self): + return self.getTypedRuleContext(ANTLRv4Parser.TokensSpecContext,0) + + + def channelsSpec(self): + return self.getTypedRuleContext(ANTLRv4Parser.ChannelsSpecContext,0) + + + def action(self): + return self.getTypedRuleContext(ANTLRv4Parser.ActionContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_prequelConstruct + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterPrequelConstruct(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitPrequelConstruct(self) + + + + + def prequelConstruct(self): + + localctx = ANTLRv4Parser.PrequelConstructContext(self, self._ctx, self.state) + self.enterRule(localctx, 4, self.RULE_prequelConstruct) + try: + self.state = 162 + token = self._input.LA(1) + if token in [ANTLRv4Parser.OPTIONS]: + self.enterOuterAlt(localctx, 1) + self.state = 157 + self.optionsSpec() + + elif token in [ANTLRv4Parser.IMPORT]: + self.enterOuterAlt(localctx, 2) + self.state = 158 + self.delegateGrammars() + + elif token in [ANTLRv4Parser.TOKENS]: + self.enterOuterAlt(localctx, 3) + self.state = 159 + self.tokensSpec() + + elif token in [ANTLRv4Parser.CHANNELS]: + self.enterOuterAlt(localctx, 4) + self.state = 160 + self.channelsSpec() + + elif token in [ANTLRv4Parser.AT]: + self.enterOuterAlt(localctx, 5) + self.state = 161 + self.action() + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class OptionsSpecContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def OPTIONS(self): + return self.getToken(ANTLRv4Parser.OPTIONS, 0) + + def LBRACE(self): + return self.getToken(ANTLRv4Parser.LBRACE, 0) + + def RBRACE(self): + return self.getToken(ANTLRv4Parser.RBRACE, 0) + + def option(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.OptionContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.OptionContext,i) + + + def SEMI(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.SEMI) + else: + return self.getToken(ANTLRv4Parser.SEMI, i) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_optionsSpec + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterOptionsSpec(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitOptionsSpec(self) + + + + + def optionsSpec(self): + + localctx = ANTLRv4Parser.OptionsSpecContext(self, self._ctx, self.state) + self.enterRule(localctx, 6, self.RULE_optionsSpec) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 164 + self.match(ANTLRv4Parser.OPTIONS) + self.state = 165 + self.match(ANTLRv4Parser.LBRACE) + self.state = 171 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF: + self.state = 166 + self.option() + self.state = 167 + self.match(ANTLRv4Parser.SEMI) + self.state = 173 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 174 + self.match(ANTLRv4Parser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class OptionContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def identifier(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0) + + + def ASSIGN(self): + return self.getToken(ANTLRv4Parser.ASSIGN, 0) + + def optionValue(self): + return self.getTypedRuleContext(ANTLRv4Parser.OptionValueContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_option + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterOption(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitOption(self) + + + + + def option(self): + + localctx = ANTLRv4Parser.OptionContext(self, self._ctx, self.state) + self.enterRule(localctx, 8, self.RULE_option) + try: + self.enterOuterAlt(localctx, 1) + self.state = 176 + self.identifier() + self.state = 177 + self.match(ANTLRv4Parser.ASSIGN) + self.state = 178 + self.optionValue() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class OptionValueContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def identifier(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i) + + + def DOT(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.DOT) + else: + return self.getToken(ANTLRv4Parser.DOT, i) + + def STRING_LITERAL(self): + return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0) + + def actionBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0) + + + def INT(self): + return self.getToken(ANTLRv4Parser.INT, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_optionValue + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterOptionValue(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitOptionValue(self) + + + + + def optionValue(self): + + localctx = ANTLRv4Parser.OptionValueContext(self, self._ctx, self.state) + self.enterRule(localctx, 10, self.RULE_optionValue) + self._la = 0 # Token type + try: + self.state = 191 + token = self._input.LA(1) + if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]: + self.enterOuterAlt(localctx, 1) + self.state = 180 + self.identifier() + self.state = 185 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.DOT: + self.state = 181 + self.match(ANTLRv4Parser.DOT) + self.state = 182 + self.identifier() + self.state = 187 + self._errHandler.sync(self) + _la = self._input.LA(1) + + + elif token in [ANTLRv4Parser.STRING_LITERAL]: + self.enterOuterAlt(localctx, 2) + self.state = 188 + self.match(ANTLRv4Parser.STRING_LITERAL) + + elif token in [ANTLRv4Parser.BEGIN_ACTION]: + self.enterOuterAlt(localctx, 3) + self.state = 189 + self.actionBlock() + + elif token in [ANTLRv4Parser.INT]: + self.enterOuterAlt(localctx, 4) + self.state = 190 + self.match(ANTLRv4Parser.INT) + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class DelegateGrammarsContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def IMPORT(self): + return self.getToken(ANTLRv4Parser.IMPORT, 0) + + def delegateGrammar(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.DelegateGrammarContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.DelegateGrammarContext,i) + + + def SEMI(self): + return self.getToken(ANTLRv4Parser.SEMI, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.COMMA) + else: + return self.getToken(ANTLRv4Parser.COMMA, i) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_delegateGrammars + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterDelegateGrammars(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitDelegateGrammars(self) + + + + + def delegateGrammars(self): + + localctx = ANTLRv4Parser.DelegateGrammarsContext(self, self._ctx, self.state) + self.enterRule(localctx, 12, self.RULE_delegateGrammars) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 193 + self.match(ANTLRv4Parser.IMPORT) + self.state = 194 + self.delegateGrammar() + self.state = 199 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.COMMA: + self.state = 195 + self.match(ANTLRv4Parser.COMMA) + self.state = 196 + self.delegateGrammar() + self.state = 201 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 202 + self.match(ANTLRv4Parser.SEMI) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class DelegateGrammarContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def identifier(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i) + + + def ASSIGN(self): + return self.getToken(ANTLRv4Parser.ASSIGN, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_delegateGrammar + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterDelegateGrammar(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitDelegateGrammar(self) + + + + + def delegateGrammar(self): + + localctx = ANTLRv4Parser.DelegateGrammarContext(self, self._ctx, self.state) + self.enterRule(localctx, 14, self.RULE_delegateGrammar) + try: + self.state = 209 + la_ = self._interp.adaptivePredict(self._input,9,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 204 + self.identifier() + self.state = 205 + self.match(ANTLRv4Parser.ASSIGN) + self.state = 206 + self.identifier() + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 208 + self.identifier() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class TokensSpecContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def TOKENS(self): + return self.getToken(ANTLRv4Parser.TOKENS, 0) + + def LBRACE(self): + return self.getToken(ANTLRv4Parser.LBRACE, 0) + + def RBRACE(self): + return self.getToken(ANTLRv4Parser.RBRACE, 0) + + def idList(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdListContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_tokensSpec + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterTokensSpec(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitTokensSpec(self) + + + + + def tokensSpec(self): + + localctx = ANTLRv4Parser.TokensSpecContext(self, self._ctx, self.state) + self.enterRule(localctx, 16, self.RULE_tokensSpec) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 211 + self.match(ANTLRv4Parser.TOKENS) + self.state = 212 + self.match(ANTLRv4Parser.LBRACE) + self.state = 214 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF: + self.state = 213 + self.idList() + + + self.state = 216 + self.match(ANTLRv4Parser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ChannelsSpecContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def CHANNELS(self): + return self.getToken(ANTLRv4Parser.CHANNELS, 0) + + def LBRACE(self): + return self.getToken(ANTLRv4Parser.LBRACE, 0) + + def RBRACE(self): + return self.getToken(ANTLRv4Parser.RBRACE, 0) + + def idList(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdListContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_channelsSpec + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterChannelsSpec(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitChannelsSpec(self) + + + + + def channelsSpec(self): + + localctx = ANTLRv4Parser.ChannelsSpecContext(self, self._ctx, self.state) + self.enterRule(localctx, 18, self.RULE_channelsSpec) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 218 + self.match(ANTLRv4Parser.CHANNELS) + self.state = 219 + self.match(ANTLRv4Parser.LBRACE) + self.state = 221 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF: + self.state = 220 + self.idList() + + + self.state = 223 + self.match(ANTLRv4Parser.RBRACE) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class IdListContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def identifier(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.COMMA) + else: + return self.getToken(ANTLRv4Parser.COMMA, i) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_idList + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterIdList(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitIdList(self) + + + + + def idList(self): + + localctx = ANTLRv4Parser.IdListContext(self, self._ctx, self.state) + self.enterRule(localctx, 20, self.RULE_idList) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 225 + self.identifier() + self.state = 230 + self._errHandler.sync(self) + _alt = self._interp.adaptivePredict(self._input,12,self._ctx) + while _alt!=2 and _alt!=ATN.INVALID_ALT_NUMBER: + if _alt==1: + self.state = 226 + self.match(ANTLRv4Parser.COMMA) + self.state = 227 + self.identifier() + self.state = 232 + self._errHandler.sync(self) + _alt = self._interp.adaptivePredict(self._input,12,self._ctx) + + self.state = 234 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.COMMA: + self.state = 233 + self.match(ANTLRv4Parser.COMMA) + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ActionContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def AT(self): + return self.getToken(ANTLRv4Parser.AT, 0) + + def identifier(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0) + + + def actionBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0) + + + def actionScopeName(self): + return self.getTypedRuleContext(ANTLRv4Parser.ActionScopeNameContext,0) + + + def COLONCOLON(self): + return self.getToken(ANTLRv4Parser.COLONCOLON, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_action + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterAction(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitAction(self) + + + + + def action(self): + + localctx = ANTLRv4Parser.ActionContext(self, self._ctx, self.state) + self.enterRule(localctx, 22, self.RULE_action) + try: + self.enterOuterAlt(localctx, 1) + self.state = 236 + self.match(ANTLRv4Parser.AT) + self.state = 240 + la_ = self._interp.adaptivePredict(self._input,14,self._ctx) + if la_ == 1: + self.state = 237 + self.actionScopeName() + self.state = 238 + self.match(ANTLRv4Parser.COLONCOLON) + + + self.state = 242 + self.identifier() + self.state = 243 + self.actionBlock() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ActionScopeNameContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def identifier(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0) + + + def LEXER(self): + return self.getToken(ANTLRv4Parser.LEXER, 0) + + def PARSER(self): + return self.getToken(ANTLRv4Parser.PARSER, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_actionScopeName + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterActionScopeName(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitActionScopeName(self) + + + + + def actionScopeName(self): + + localctx = ANTLRv4Parser.ActionScopeNameContext(self, self._ctx, self.state) + self.enterRule(localctx, 24, self.RULE_actionScopeName) + try: + self.state = 248 + token = self._input.LA(1) + if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]: + self.enterOuterAlt(localctx, 1) + self.state = 245 + self.identifier() + + elif token in [ANTLRv4Parser.LEXER]: + self.enterOuterAlt(localctx, 2) + self.state = 246 + self.match(ANTLRv4Parser.LEXER) + + elif token in [ANTLRv4Parser.PARSER]: + self.enterOuterAlt(localctx, 3) + self.state = 247 + self.match(ANTLRv4Parser.PARSER) + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ActionBlockContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def BEGIN_ACTION(self): + return self.getToken(ANTLRv4Parser.BEGIN_ACTION, 0) + + def END_ACTION(self): + return self.getToken(ANTLRv4Parser.END_ACTION, 0) + + def ACTION_CONTENT(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.ACTION_CONTENT) + else: + return self.getToken(ANTLRv4Parser.ACTION_CONTENT, i) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_actionBlock + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterActionBlock(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitActionBlock(self) + + + + + def actionBlock(self): + + localctx = ANTLRv4Parser.ActionBlockContext(self, self._ctx, self.state) + self.enterRule(localctx, 26, self.RULE_actionBlock) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 250 + self.match(ANTLRv4Parser.BEGIN_ACTION) + self.state = 254 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.ACTION_CONTENT: + self.state = 251 + self.match(ANTLRv4Parser.ACTION_CONTENT) + self.state = 256 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 257 + self.match(ANTLRv4Parser.END_ACTION) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ArgActionBlockContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def BEGIN_ARGUMENT(self): + return self.getToken(ANTLRv4Parser.BEGIN_ARGUMENT, 0) + + def END_ARGUMENT(self): + return self.getToken(ANTLRv4Parser.END_ARGUMENT, 0) + + def ARGUMENT_CONTENT(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.ARGUMENT_CONTENT) + else: + return self.getToken(ANTLRv4Parser.ARGUMENT_CONTENT, i) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_argActionBlock + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterArgActionBlock(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitArgActionBlock(self) + + + + + def argActionBlock(self): + + localctx = ANTLRv4Parser.ArgActionBlockContext(self, self._ctx, self.state) + self.enterRule(localctx, 28, self.RULE_argActionBlock) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 259 + self.match(ANTLRv4Parser.BEGIN_ARGUMENT) + self.state = 263 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.ARGUMENT_CONTENT: + self.state = 260 + self.match(ANTLRv4Parser.ARGUMENT_CONTENT) + self.state = 265 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 266 + self.match(ANTLRv4Parser.END_ARGUMENT) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ModeSpecContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def MODE(self): + return self.getToken(ANTLRv4Parser.MODE, 0) + + def identifier(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0) + + + def SEMI(self): + return self.getToken(ANTLRv4Parser.SEMI, 0) + + def lexerRuleSpec(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.LexerRuleSpecContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.LexerRuleSpecContext,i) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_modeSpec + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterModeSpec(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitModeSpec(self) + + + + + def modeSpec(self): + + localctx = ANTLRv4Parser.ModeSpecContext(self, self._ctx, self.state) + self.enterRule(localctx, 30, self.RULE_modeSpec) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 268 + self.match(ANTLRv4Parser.MODE) + self.state = 269 + self.identifier() + self.state = 270 + self.match(ANTLRv4Parser.SEMI) + self.state = 274 + self._errHandler.sync(self) + _la = self._input.LA(1) + while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.FRAGMENT))) != 0): + self.state = 271 + self.lexerRuleSpec() + self.state = 276 + self._errHandler.sync(self) + _la = self._input.LA(1) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class RulesContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ruleSpec(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.RuleSpecContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.RuleSpecContext,i) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_rules + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterRules(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitRules(self) + + + + + def rules(self): + + localctx = ANTLRv4Parser.RulesContext(self, self._ctx, self.state) + self.enterRule(localctx, 32, self.RULE_rules) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 280 + self._errHandler.sync(self) + _la = self._input.LA(1) + while (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.RULE_REF) | (1 << ANTLRv4Parser.DOC_COMMENT) | (1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0): + self.state = 277 + self.ruleSpec() + self.state = 282 + self._errHandler.sync(self) + _la = self._input.LA(1) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class RuleSpecContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def parserRuleSpec(self): + return self.getTypedRuleContext(ANTLRv4Parser.ParserRuleSpecContext,0) + + + def lexerRuleSpec(self): + return self.getTypedRuleContext(ANTLRv4Parser.LexerRuleSpecContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_ruleSpec + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterRuleSpec(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitRuleSpec(self) + + + + + def ruleSpec(self): + + localctx = ANTLRv4Parser.RuleSpecContext(self, self._ctx, self.state) + self.enterRule(localctx, 34, self.RULE_ruleSpec) + try: + self.state = 285 + la_ = self._interp.adaptivePredict(self._input,20,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 283 + self.parserRuleSpec() + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 284 + self.lexerRuleSpec() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ParserRuleSpecContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def RULE_REF(self): + return self.getToken(ANTLRv4Parser.RULE_REF, 0) + + def COLON(self): + return self.getToken(ANTLRv4Parser.COLON, 0) + + def ruleBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.RuleBlockContext,0) + + + def SEMI(self): + return self.getToken(ANTLRv4Parser.SEMI, 0) + + def exceptionGroup(self): + return self.getTypedRuleContext(ANTLRv4Parser.ExceptionGroupContext,0) + + + def DOC_COMMENT(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.DOC_COMMENT) + else: + return self.getToken(ANTLRv4Parser.DOC_COMMENT, i) + + def ruleModifiers(self): + return self.getTypedRuleContext(ANTLRv4Parser.RuleModifiersContext,0) + + + def argActionBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0) + + + def ruleReturns(self): + return self.getTypedRuleContext(ANTLRv4Parser.RuleReturnsContext,0) + + + def throwsSpec(self): + return self.getTypedRuleContext(ANTLRv4Parser.ThrowsSpecContext,0) + + + def localsSpec(self): + return self.getTypedRuleContext(ANTLRv4Parser.LocalsSpecContext,0) + + + def rulePrequel(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.RulePrequelContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.RulePrequelContext,i) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_parserRuleSpec + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterParserRuleSpec(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitParserRuleSpec(self) + + + + + def parserRuleSpec(self): + + localctx = ANTLRv4Parser.ParserRuleSpecContext(self, self._ctx, self.state) + self.enterRule(localctx, 36, self.RULE_parserRuleSpec) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 290 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.DOC_COMMENT: + self.state = 287 + self.match(ANTLRv4Parser.DOC_COMMENT) + self.state = 292 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 294 + _la = self._input.LA(1) + if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0): + self.state = 293 + self.ruleModifiers() + + + self.state = 296 + self.match(ANTLRv4Parser.RULE_REF) + self.state = 298 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.BEGIN_ARGUMENT: + self.state = 297 + self.argActionBlock() + + + self.state = 301 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.RETURNS: + self.state = 300 + self.ruleReturns() + + + self.state = 304 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.THROWS: + self.state = 303 + self.throwsSpec() + + + self.state = 307 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.LOCALS: + self.state = 306 + self.localsSpec() + + + self.state = 312 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.OPTIONS or _la==ANTLRv4Parser.AT: + self.state = 309 + self.rulePrequel() + self.state = 314 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 315 + self.match(ANTLRv4Parser.COLON) + self.state = 316 + self.ruleBlock() + self.state = 317 + self.match(ANTLRv4Parser.SEMI) + self.state = 318 + self.exceptionGroup() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ExceptionGroupContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def exceptionHandler(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.ExceptionHandlerContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.ExceptionHandlerContext,i) + + + def finallyClause(self): + return self.getTypedRuleContext(ANTLRv4Parser.FinallyClauseContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_exceptionGroup + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterExceptionGroup(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitExceptionGroup(self) + + + + + def exceptionGroup(self): + + localctx = ANTLRv4Parser.ExceptionGroupContext(self, self._ctx, self.state) + self.enterRule(localctx, 38, self.RULE_exceptionGroup) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 323 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.CATCH: + self.state = 320 + self.exceptionHandler() + self.state = 325 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 327 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.FINALLY: + self.state = 326 + self.finallyClause() + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ExceptionHandlerContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def CATCH(self): + return self.getToken(ANTLRv4Parser.CATCH, 0) + + def argActionBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0) + + + def actionBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_exceptionHandler + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterExceptionHandler(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitExceptionHandler(self) + + + + + def exceptionHandler(self): + + localctx = ANTLRv4Parser.ExceptionHandlerContext(self, self._ctx, self.state) + self.enterRule(localctx, 40, self.RULE_exceptionHandler) + try: + self.enterOuterAlt(localctx, 1) + self.state = 329 + self.match(ANTLRv4Parser.CATCH) + self.state = 330 + self.argActionBlock() + self.state = 331 + self.actionBlock() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class FinallyClauseContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def FINALLY(self): + return self.getToken(ANTLRv4Parser.FINALLY, 0) + + def actionBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_finallyClause + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterFinallyClause(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitFinallyClause(self) + + + + + def finallyClause(self): + + localctx = ANTLRv4Parser.FinallyClauseContext(self, self._ctx, self.state) + self.enterRule(localctx, 42, self.RULE_finallyClause) + try: + self.enterOuterAlt(localctx, 1) + self.state = 333 + self.match(ANTLRv4Parser.FINALLY) + self.state = 334 + self.actionBlock() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class RulePrequelContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def optionsSpec(self): + return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0) + + + def ruleAction(self): + return self.getTypedRuleContext(ANTLRv4Parser.RuleActionContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_rulePrequel + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterRulePrequel(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitRulePrequel(self) + + + + + def rulePrequel(self): + + localctx = ANTLRv4Parser.RulePrequelContext(self, self._ctx, self.state) + self.enterRule(localctx, 44, self.RULE_rulePrequel) + try: + self.state = 338 + token = self._input.LA(1) + if token in [ANTLRv4Parser.OPTIONS]: + self.enterOuterAlt(localctx, 1) + self.state = 336 + self.optionsSpec() + + elif token in [ANTLRv4Parser.AT]: + self.enterOuterAlt(localctx, 2) + self.state = 337 + self.ruleAction() + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class RuleReturnsContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def RETURNS(self): + return self.getToken(ANTLRv4Parser.RETURNS, 0) + + def argActionBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_ruleReturns + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterRuleReturns(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitRuleReturns(self) + + + + + def ruleReturns(self): + + localctx = ANTLRv4Parser.RuleReturnsContext(self, self._ctx, self.state) + self.enterRule(localctx, 46, self.RULE_ruleReturns) + try: + self.enterOuterAlt(localctx, 1) + self.state = 340 + self.match(ANTLRv4Parser.RETURNS) + self.state = 341 + self.argActionBlock() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ThrowsSpecContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def THROWS(self): + return self.getToken(ANTLRv4Parser.THROWS, 0) + + def identifier(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.COMMA) + else: + return self.getToken(ANTLRv4Parser.COMMA, i) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_throwsSpec + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterThrowsSpec(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitThrowsSpec(self) + + + + + def throwsSpec(self): + + localctx = ANTLRv4Parser.ThrowsSpecContext(self, self._ctx, self.state) + self.enterRule(localctx, 48, self.RULE_throwsSpec) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 343 + self.match(ANTLRv4Parser.THROWS) + self.state = 344 + self.identifier() + self.state = 349 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.COMMA: + self.state = 345 + self.match(ANTLRv4Parser.COMMA) + self.state = 346 + self.identifier() + self.state = 351 + self._errHandler.sync(self) + _la = self._input.LA(1) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LocalsSpecContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LOCALS(self): + return self.getToken(ANTLRv4Parser.LOCALS, 0) + + def argActionBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_localsSpec + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLocalsSpec(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLocalsSpec(self) + + + + + def localsSpec(self): + + localctx = ANTLRv4Parser.LocalsSpecContext(self, self._ctx, self.state) + self.enterRule(localctx, 50, self.RULE_localsSpec) + try: + self.enterOuterAlt(localctx, 1) + self.state = 352 + self.match(ANTLRv4Parser.LOCALS) + self.state = 353 + self.argActionBlock() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class RuleActionContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def AT(self): + return self.getToken(ANTLRv4Parser.AT, 0) + + def identifier(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0) + + + def actionBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_ruleAction + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterRuleAction(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitRuleAction(self) + + + + + def ruleAction(self): + + localctx = ANTLRv4Parser.RuleActionContext(self, self._ctx, self.state) + self.enterRule(localctx, 52, self.RULE_ruleAction) + try: + self.enterOuterAlt(localctx, 1) + self.state = 355 + self.match(ANTLRv4Parser.AT) + self.state = 356 + self.identifier() + self.state = 357 + self.actionBlock() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class RuleModifiersContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ruleModifier(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.RuleModifierContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.RuleModifierContext,i) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_ruleModifiers + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterRuleModifiers(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitRuleModifiers(self) + + + + + def ruleModifiers(self): + + localctx = ANTLRv4Parser.RuleModifiersContext(self, self._ctx, self.state) + self.enterRule(localctx, 54, self.RULE_ruleModifiers) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 360 + self._errHandler.sync(self) + _la = self._input.LA(1) + while True: + self.state = 359 + self.ruleModifier() + self.state = 362 + self._errHandler.sync(self) + _la = self._input.LA(1) + if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0)): + break + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class RuleModifierContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def PUBLIC(self): + return self.getToken(ANTLRv4Parser.PUBLIC, 0) + + def PRIVATE(self): + return self.getToken(ANTLRv4Parser.PRIVATE, 0) + + def PROTECTED(self): + return self.getToken(ANTLRv4Parser.PROTECTED, 0) + + def FRAGMENT(self): + return self.getToken(ANTLRv4Parser.FRAGMENT, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_ruleModifier + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterRuleModifier(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitRuleModifier(self) + + + + + def ruleModifier(self): + + localctx = ANTLRv4Parser.RuleModifierContext(self, self._ctx, self.state) + self.enterRule(localctx, 56, self.RULE_ruleModifier) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 364 + _la = self._input.LA(1) + if not((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.FRAGMENT) | (1 << ANTLRv4Parser.PROTECTED) | (1 << ANTLRv4Parser.PUBLIC) | (1 << ANTLRv4Parser.PRIVATE))) != 0)): + self._errHandler.recoverInline(self) + else: + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class RuleBlockContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ruleAltList(self): + return self.getTypedRuleContext(ANTLRv4Parser.RuleAltListContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_ruleBlock + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterRuleBlock(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitRuleBlock(self) + + + + + def ruleBlock(self): + + localctx = ANTLRv4Parser.RuleBlockContext(self, self._ctx, self.state) + self.enterRule(localctx, 58, self.RULE_ruleBlock) + try: + self.enterOuterAlt(localctx, 1) + self.state = 366 + self.ruleAltList() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class RuleAltListContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def labeledAlt(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.LabeledAltContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.LabeledAltContext,i) + + + def OR(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.OR) + else: + return self.getToken(ANTLRv4Parser.OR, i) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_ruleAltList + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterRuleAltList(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitRuleAltList(self) + + + + + def ruleAltList(self): + + localctx = ANTLRv4Parser.RuleAltListContext(self, self._ctx, self.state) + self.enterRule(localctx, 60, self.RULE_ruleAltList) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 368 + self.labeledAlt() + self.state = 373 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.OR: + self.state = 369 + self.match(ANTLRv4Parser.OR) + self.state = 370 + self.labeledAlt() + self.state = 375 + self._errHandler.sync(self) + _la = self._input.LA(1) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LabeledAltContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def alternative(self): + return self.getTypedRuleContext(ANTLRv4Parser.AlternativeContext,0) + + + def POUND(self): + return self.getToken(ANTLRv4Parser.POUND, 0) + + def identifier(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_labeledAlt + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLabeledAlt(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLabeledAlt(self) + + + + + def labeledAlt(self): + + localctx = ANTLRv4Parser.LabeledAltContext(self, self._ctx, self.state) + self.enterRule(localctx, 62, self.RULE_labeledAlt) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 376 + self.alternative() + self.state = 379 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.POUND: + self.state = 377 + self.match(ANTLRv4Parser.POUND) + self.state = 378 + self.identifier() + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LexerRuleSpecContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def TOKEN_REF(self): + return self.getToken(ANTLRv4Parser.TOKEN_REF, 0) + + def COLON(self): + return self.getToken(ANTLRv4Parser.COLON, 0) + + def lexerRuleBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.LexerRuleBlockContext,0) + + + def SEMI(self): + return self.getToken(ANTLRv4Parser.SEMI, 0) + + def DOC_COMMENT(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.DOC_COMMENT) + else: + return self.getToken(ANTLRv4Parser.DOC_COMMENT, i) + + def FRAGMENT(self): + return self.getToken(ANTLRv4Parser.FRAGMENT, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_lexerRuleSpec + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLexerRuleSpec(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLexerRuleSpec(self) + + + + + def lexerRuleSpec(self): + + localctx = ANTLRv4Parser.LexerRuleSpecContext(self, self._ctx, self.state) + self.enterRule(localctx, 64, self.RULE_lexerRuleSpec) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 384 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.DOC_COMMENT: + self.state = 381 + self.match(ANTLRv4Parser.DOC_COMMENT) + self.state = 386 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 388 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.FRAGMENT: + self.state = 387 + self.match(ANTLRv4Parser.FRAGMENT) + + + self.state = 390 + self.match(ANTLRv4Parser.TOKEN_REF) + self.state = 391 + self.match(ANTLRv4Parser.COLON) + self.state = 392 + self.lexerRuleBlock() + self.state = 393 + self.match(ANTLRv4Parser.SEMI) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LexerRuleBlockContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def lexerAltList(self): + return self.getTypedRuleContext(ANTLRv4Parser.LexerAltListContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_lexerRuleBlock + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLexerRuleBlock(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLexerRuleBlock(self) + + + + + def lexerRuleBlock(self): + + localctx = ANTLRv4Parser.LexerRuleBlockContext(self, self._ctx, self.state) + self.enterRule(localctx, 66, self.RULE_lexerRuleBlock) + try: + self.enterOuterAlt(localctx, 1) + self.state = 395 + self.lexerAltList() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LexerAltListContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def lexerAlt(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.LexerAltContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.LexerAltContext,i) + + + def OR(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.OR) + else: + return self.getToken(ANTLRv4Parser.OR, i) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_lexerAltList + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLexerAltList(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLexerAltList(self) + + + + + def lexerAltList(self): + + localctx = ANTLRv4Parser.LexerAltListContext(self, self._ctx, self.state) + self.enterRule(localctx, 68, self.RULE_lexerAltList) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 397 + self.lexerAlt() + self.state = 402 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.OR: + self.state = 398 + self.match(ANTLRv4Parser.OR) + self.state = 399 + self.lexerAlt() + self.state = 404 + self._errHandler.sync(self) + _la = self._input.LA(1) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LexerAltContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def lexerElements(self): + return self.getTypedRuleContext(ANTLRv4Parser.LexerElementsContext,0) + + + def lexerCommands(self): + return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandsContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_lexerAlt + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLexerAlt(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLexerAlt(self) + + + + + def lexerAlt(self): + + localctx = ANTLRv4Parser.LexerAltContext(self, self._ctx, self.state) + self.enterRule(localctx, 70, self.RULE_lexerAlt) + self._la = 0 # Token type + try: + self.state = 410 + token = self._input.LA(1) + if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.LEXER_CHAR_SET, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.BEGIN_ACTION, ANTLRv4Parser.LPAREN, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]: + self.enterOuterAlt(localctx, 1) + self.state = 405 + self.lexerElements() + self.state = 407 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.RARROW: + self.state = 406 + self.lexerCommands() + + + + elif token in [ANTLRv4Parser.SEMI, ANTLRv4Parser.RPAREN, ANTLRv4Parser.OR]: + self.enterOuterAlt(localctx, 2) + + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LexerElementsContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def lexerElement(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.LexerElementContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.LexerElementContext,i) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_lexerElements + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLexerElements(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLexerElements(self) + + + + + def lexerElements(self): + + localctx = ANTLRv4Parser.LexerElementsContext(self, self._ctx, self.state) + self.enterRule(localctx, 72, self.RULE_lexerElements) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 413 + self._errHandler.sync(self) + _la = self._input.LA(1) + while True: + self.state = 412 + self.lexerElement() + self.state = 415 + self._errHandler.sync(self) + _la = self._input.LA(1) + if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.RULE_REF) | (1 << ANTLRv4Parser.LEXER_CHAR_SET) | (1 << ANTLRv4Parser.STRING_LITERAL) | (1 << ANTLRv4Parser.BEGIN_ACTION) | (1 << ANTLRv4Parser.LPAREN) | (1 << ANTLRv4Parser.DOT) | (1 << ANTLRv4Parser.NOT))) != 0)): + break + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LexerElementContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def labeledLexerElement(self): + return self.getTypedRuleContext(ANTLRv4Parser.LabeledLexerElementContext,0) + + + def ebnfSuffix(self): + return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0) + + + def lexerAtom(self): + return self.getTypedRuleContext(ANTLRv4Parser.LexerAtomContext,0) + + + def lexerBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.LexerBlockContext,0) + + + def actionBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0) + + + def QUESTION(self): + return self.getToken(ANTLRv4Parser.QUESTION, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_lexerElement + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLexerElement(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLexerElement(self) + + + + + def lexerElement(self): + + localctx = ANTLRv4Parser.LexerElementContext(self, self._ctx, self.state) + self.enterRule(localctx, 74, self.RULE_lexerElement) + self._la = 0 # Token type + try: + self.state = 433 + la_ = self._interp.adaptivePredict(self._input,45,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 417 + self.labeledLexerElement() + self.state = 419 + _la = self._input.LA(1) + if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0): + self.state = 418 + self.ebnfSuffix() + + + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 421 + self.lexerAtom() + self.state = 423 + _la = self._input.LA(1) + if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0): + self.state = 422 + self.ebnfSuffix() + + + pass + + elif la_ == 3: + self.enterOuterAlt(localctx, 3) + self.state = 425 + self.lexerBlock() + self.state = 427 + _la = self._input.LA(1) + if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0): + self.state = 426 + self.ebnfSuffix() + + + pass + + elif la_ == 4: + self.enterOuterAlt(localctx, 4) + self.state = 429 + self.actionBlock() + self.state = 431 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.QUESTION: + self.state = 430 + self.match(ANTLRv4Parser.QUESTION) + + + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LabeledLexerElementContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def identifier(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0) + + + def ASSIGN(self): + return self.getToken(ANTLRv4Parser.ASSIGN, 0) + + def PLUS_ASSIGN(self): + return self.getToken(ANTLRv4Parser.PLUS_ASSIGN, 0) + + def lexerAtom(self): + return self.getTypedRuleContext(ANTLRv4Parser.LexerAtomContext,0) + + + def block(self): + return self.getTypedRuleContext(ANTLRv4Parser.BlockContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_labeledLexerElement + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLabeledLexerElement(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLabeledLexerElement(self) + + + + + def labeledLexerElement(self): + + localctx = ANTLRv4Parser.LabeledLexerElementContext(self, self._ctx, self.state) + self.enterRule(localctx, 76, self.RULE_labeledLexerElement) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 435 + self.identifier() + self.state = 436 + _la = self._input.LA(1) + if not(_la==ANTLRv4Parser.ASSIGN or _la==ANTLRv4Parser.PLUS_ASSIGN): + self._errHandler.recoverInline(self) + else: + self.consume() + self.state = 439 + token = self._input.LA(1) + if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.LEXER_CHAR_SET, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]: + self.state = 437 + self.lexerAtom() + + elif token in [ANTLRv4Parser.LPAREN]: + self.state = 438 + self.block() + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LexerBlockContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LPAREN(self): + return self.getToken(ANTLRv4Parser.LPAREN, 0) + + def lexerAltList(self): + return self.getTypedRuleContext(ANTLRv4Parser.LexerAltListContext,0) + + + def RPAREN(self): + return self.getToken(ANTLRv4Parser.RPAREN, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_lexerBlock + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLexerBlock(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLexerBlock(self) + + + + + def lexerBlock(self): + + localctx = ANTLRv4Parser.LexerBlockContext(self, self._ctx, self.state) + self.enterRule(localctx, 78, self.RULE_lexerBlock) + try: + self.enterOuterAlt(localctx, 1) + self.state = 441 + self.match(ANTLRv4Parser.LPAREN) + self.state = 442 + self.lexerAltList() + self.state = 443 + self.match(ANTLRv4Parser.RPAREN) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LexerCommandsContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def RARROW(self): + return self.getToken(ANTLRv4Parser.RARROW, 0) + + def lexerCommand(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.LexerCommandContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandContext,i) + + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.COMMA) + else: + return self.getToken(ANTLRv4Parser.COMMA, i) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_lexerCommands + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLexerCommands(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLexerCommands(self) + + + + + def lexerCommands(self): + + localctx = ANTLRv4Parser.LexerCommandsContext(self, self._ctx, self.state) + self.enterRule(localctx, 80, self.RULE_lexerCommands) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 445 + self.match(ANTLRv4Parser.RARROW) + self.state = 446 + self.lexerCommand() + self.state = 451 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.COMMA: + self.state = 447 + self.match(ANTLRv4Parser.COMMA) + self.state = 448 + self.lexerCommand() + self.state = 453 + self._errHandler.sync(self) + _la = self._input.LA(1) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LexerCommandContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def lexerCommandName(self): + return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandNameContext,0) + + + def LPAREN(self): + return self.getToken(ANTLRv4Parser.LPAREN, 0) + + def lexerCommandExpr(self): + return self.getTypedRuleContext(ANTLRv4Parser.LexerCommandExprContext,0) + + + def RPAREN(self): + return self.getToken(ANTLRv4Parser.RPAREN, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_lexerCommand + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLexerCommand(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLexerCommand(self) + + + + + def lexerCommand(self): + + localctx = ANTLRv4Parser.LexerCommandContext(self, self._ctx, self.state) + self.enterRule(localctx, 82, self.RULE_lexerCommand) + try: + self.state = 460 + la_ = self._interp.adaptivePredict(self._input,48,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 454 + self.lexerCommandName() + self.state = 455 + self.match(ANTLRv4Parser.LPAREN) + self.state = 456 + self.lexerCommandExpr() + self.state = 457 + self.match(ANTLRv4Parser.RPAREN) + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 459 + self.lexerCommandName() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LexerCommandNameContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def identifier(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0) + + + def MODE(self): + return self.getToken(ANTLRv4Parser.MODE, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_lexerCommandName + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLexerCommandName(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLexerCommandName(self) + + + + + def lexerCommandName(self): + + localctx = ANTLRv4Parser.LexerCommandNameContext(self, self._ctx, self.state) + self.enterRule(localctx, 84, self.RULE_lexerCommandName) + try: + self.state = 464 + token = self._input.LA(1) + if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]: + self.enterOuterAlt(localctx, 1) + self.state = 462 + self.identifier() + + elif token in [ANTLRv4Parser.MODE]: + self.enterOuterAlt(localctx, 2) + self.state = 463 + self.match(ANTLRv4Parser.MODE) + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LexerCommandExprContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def identifier(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0) + + + def INT(self): + return self.getToken(ANTLRv4Parser.INT, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_lexerCommandExpr + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLexerCommandExpr(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLexerCommandExpr(self) + + + + + def lexerCommandExpr(self): + + localctx = ANTLRv4Parser.LexerCommandExprContext(self, self._ctx, self.state) + self.enterRule(localctx, 86, self.RULE_lexerCommandExpr) + try: + self.state = 468 + token = self._input.LA(1) + if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]: + self.enterOuterAlt(localctx, 1) + self.state = 466 + self.identifier() + + elif token in [ANTLRv4Parser.INT]: + self.enterOuterAlt(localctx, 2) + self.state = 467 + self.match(ANTLRv4Parser.INT) + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class AltListContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def alternative(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.AlternativeContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.AlternativeContext,i) + + + def OR(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.OR) + else: + return self.getToken(ANTLRv4Parser.OR, i) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_altList + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterAltList(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitAltList(self) + + + + + def altList(self): + + localctx = ANTLRv4Parser.AltListContext(self, self._ctx, self.state) + self.enterRule(localctx, 88, self.RULE_altList) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 470 + self.alternative() + self.state = 475 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.OR: + self.state = 471 + self.match(ANTLRv4Parser.OR) + self.state = 472 + self.alternative() + self.state = 477 + self._errHandler.sync(self) + _la = self._input.LA(1) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class AlternativeContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def elementOptions(self): + return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0) + + + def element(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.ElementContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.ElementContext,i) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_alternative + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterAlternative(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitAlternative(self) + + + + + def alternative(self): + + localctx = ANTLRv4Parser.AlternativeContext(self, self._ctx, self.state) + self.enterRule(localctx, 90, self.RULE_alternative) + self._la = 0 # Token type + try: + self.state = 487 + token = self._input.LA(1) + if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.BEGIN_ACTION, ANTLRv4Parser.LPAREN, ANTLRv4Parser.LT, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]: + self.enterOuterAlt(localctx, 1) + self.state = 479 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.LT: + self.state = 478 + self.elementOptions() + + + self.state = 482 + self._errHandler.sync(self) + _la = self._input.LA(1) + while True: + self.state = 481 + self.element() + self.state = 484 + self._errHandler.sync(self) + _la = self._input.LA(1) + if not ((((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.TOKEN_REF) | (1 << ANTLRv4Parser.RULE_REF) | (1 << ANTLRv4Parser.STRING_LITERAL) | (1 << ANTLRv4Parser.BEGIN_ACTION) | (1 << ANTLRv4Parser.LPAREN) | (1 << ANTLRv4Parser.DOT) | (1 << ANTLRv4Parser.NOT))) != 0)): + break + + + elif token in [ANTLRv4Parser.SEMI, ANTLRv4Parser.RPAREN, ANTLRv4Parser.OR, ANTLRv4Parser.POUND]: + self.enterOuterAlt(localctx, 2) + + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ElementContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def labeledElement(self): + return self.getTypedRuleContext(ANTLRv4Parser.LabeledElementContext,0) + + + def ebnfSuffix(self): + return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0) + + + def atom(self): + return self.getTypedRuleContext(ANTLRv4Parser.AtomContext,0) + + + def ebnf(self): + return self.getTypedRuleContext(ANTLRv4Parser.EbnfContext,0) + + + def actionBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.ActionBlockContext,0) + + + def QUESTION(self): + return self.getToken(ANTLRv4Parser.QUESTION, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_element + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterElement(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitElement(self) + + + + + def element(self): + + localctx = ANTLRv4Parser.ElementContext(self, self._ctx, self.state) + self.enterRule(localctx, 92, self.RULE_element) + self._la = 0 # Token type + try: + self.state = 504 + la_ = self._interp.adaptivePredict(self._input,58,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 489 + self.labeledElement() + self.state = 492 + token = self._input.LA(1) + if token in [ANTLRv4Parser.QUESTION, ANTLRv4Parser.STAR, ANTLRv4Parser.PLUS]: + self.state = 490 + self.ebnfSuffix() + + elif token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.BEGIN_ACTION, ANTLRv4Parser.SEMI, ANTLRv4Parser.LPAREN, ANTLRv4Parser.RPAREN, ANTLRv4Parser.OR, ANTLRv4Parser.DOT, ANTLRv4Parser.POUND, ANTLRv4Parser.NOT]: + pass + + else: + raise NoViableAltException(self) + + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 494 + self.atom() + self.state = 497 + token = self._input.LA(1) + if token in [ANTLRv4Parser.QUESTION, ANTLRv4Parser.STAR, ANTLRv4Parser.PLUS]: + self.state = 495 + self.ebnfSuffix() + + elif token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.BEGIN_ACTION, ANTLRv4Parser.SEMI, ANTLRv4Parser.LPAREN, ANTLRv4Parser.RPAREN, ANTLRv4Parser.OR, ANTLRv4Parser.DOT, ANTLRv4Parser.POUND, ANTLRv4Parser.NOT]: + pass + + else: + raise NoViableAltException(self) + + pass + + elif la_ == 3: + self.enterOuterAlt(localctx, 3) + self.state = 499 + self.ebnf() + pass + + elif la_ == 4: + self.enterOuterAlt(localctx, 4) + self.state = 500 + self.actionBlock() + self.state = 502 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.QUESTION: + self.state = 501 + self.match(ANTLRv4Parser.QUESTION) + + + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LabeledElementContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def identifier(self): + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,0) + + + def ASSIGN(self): + return self.getToken(ANTLRv4Parser.ASSIGN, 0) + + def PLUS_ASSIGN(self): + return self.getToken(ANTLRv4Parser.PLUS_ASSIGN, 0) + + def atom(self): + return self.getTypedRuleContext(ANTLRv4Parser.AtomContext,0) + + + def block(self): + return self.getTypedRuleContext(ANTLRv4Parser.BlockContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_labeledElement + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLabeledElement(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLabeledElement(self) + + + + + def labeledElement(self): + + localctx = ANTLRv4Parser.LabeledElementContext(self, self._ctx, self.state) + self.enterRule(localctx, 94, self.RULE_labeledElement) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 506 + self.identifier() + self.state = 507 + _la = self._input.LA(1) + if not(_la==ANTLRv4Parser.ASSIGN or _la==ANTLRv4Parser.PLUS_ASSIGN): + self._errHandler.recoverInline(self) + else: + self.consume() + self.state = 510 + token = self._input.LA(1) + if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF, ANTLRv4Parser.STRING_LITERAL, ANTLRv4Parser.DOT, ANTLRv4Parser.NOT]: + self.state = 508 + self.atom() + + elif token in [ANTLRv4Parser.LPAREN]: + self.state = 509 + self.block() + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class EbnfContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def block(self): + return self.getTypedRuleContext(ANTLRv4Parser.BlockContext,0) + + + def blockSuffix(self): + return self.getTypedRuleContext(ANTLRv4Parser.BlockSuffixContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_ebnf + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterEbnf(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitEbnf(self) + + + + + def ebnf(self): + + localctx = ANTLRv4Parser.EbnfContext(self, self._ctx, self.state) + self.enterRule(localctx, 96, self.RULE_ebnf) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 512 + self.block() + self.state = 514 + _la = self._input.LA(1) + if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.QUESTION) | (1 << ANTLRv4Parser.STAR) | (1 << ANTLRv4Parser.PLUS))) != 0): + self.state = 513 + self.blockSuffix() + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class BlockSuffixContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def ebnfSuffix(self): + return self.getTypedRuleContext(ANTLRv4Parser.EbnfSuffixContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_blockSuffix + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterBlockSuffix(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitBlockSuffix(self) + + + + + def blockSuffix(self): + + localctx = ANTLRv4Parser.BlockSuffixContext(self, self._ctx, self.state) + self.enterRule(localctx, 98, self.RULE_blockSuffix) + try: + self.enterOuterAlt(localctx, 1) + self.state = 516 + self.ebnfSuffix() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class EbnfSuffixContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def QUESTION(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.QUESTION) + else: + return self.getToken(ANTLRv4Parser.QUESTION, i) + + def STAR(self): + return self.getToken(ANTLRv4Parser.STAR, 0) + + def PLUS(self): + return self.getToken(ANTLRv4Parser.PLUS, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_ebnfSuffix + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterEbnfSuffix(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitEbnfSuffix(self) + + + + + def ebnfSuffix(self): + + localctx = ANTLRv4Parser.EbnfSuffixContext(self, self._ctx, self.state) + self.enterRule(localctx, 100, self.RULE_ebnfSuffix) + self._la = 0 # Token type + try: + self.state = 530 + token = self._input.LA(1) + if token in [ANTLRv4Parser.QUESTION]: + self.enterOuterAlt(localctx, 1) + self.state = 518 + self.match(ANTLRv4Parser.QUESTION) + self.state = 520 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.QUESTION: + self.state = 519 + self.match(ANTLRv4Parser.QUESTION) + + + + elif token in [ANTLRv4Parser.STAR]: + self.enterOuterAlt(localctx, 2) + self.state = 522 + self.match(ANTLRv4Parser.STAR) + self.state = 524 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.QUESTION: + self.state = 523 + self.match(ANTLRv4Parser.QUESTION) + + + + elif token in [ANTLRv4Parser.PLUS]: + self.enterOuterAlt(localctx, 3) + self.state = 526 + self.match(ANTLRv4Parser.PLUS) + self.state = 528 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.QUESTION: + self.state = 527 + self.match(ANTLRv4Parser.QUESTION) + + + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class LexerAtomContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def characterRange(self): + return self.getTypedRuleContext(ANTLRv4Parser.CharacterRangeContext,0) + + + def terminal(self): + return self.getTypedRuleContext(ANTLRv4Parser.TerminalContext,0) + + + def notSet(self): + return self.getTypedRuleContext(ANTLRv4Parser.NotSetContext,0) + + + def LEXER_CHAR_SET(self): + return self.getToken(ANTLRv4Parser.LEXER_CHAR_SET, 0) + + def DOT(self): + return self.getToken(ANTLRv4Parser.DOT, 0) + + def elementOptions(self): + return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_lexerAtom + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterLexerAtom(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitLexerAtom(self) + + + + + def lexerAtom(self): + + localctx = ANTLRv4Parser.LexerAtomContext(self, self._ctx, self.state) + self.enterRule(localctx, 102, self.RULE_lexerAtom) + self._la = 0 # Token type + try: + self.state = 540 + la_ = self._interp.adaptivePredict(self._input,66,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 532 + self.characterRange() + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 533 + self.terminal() + pass + + elif la_ == 3: + self.enterOuterAlt(localctx, 3) + self.state = 534 + self.notSet() + pass + + elif la_ == 4: + self.enterOuterAlt(localctx, 4) + self.state = 535 + self.match(ANTLRv4Parser.LEXER_CHAR_SET) + pass + + elif la_ == 5: + self.enterOuterAlt(localctx, 5) + self.state = 536 + self.match(ANTLRv4Parser.DOT) + self.state = 538 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.LT: + self.state = 537 + self.elementOptions() + + + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class AtomContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def characterRange(self): + return self.getTypedRuleContext(ANTLRv4Parser.CharacterRangeContext,0) + + + def terminal(self): + return self.getTypedRuleContext(ANTLRv4Parser.TerminalContext,0) + + + def ruleref(self): + return self.getTypedRuleContext(ANTLRv4Parser.RulerefContext,0) + + + def notSet(self): + return self.getTypedRuleContext(ANTLRv4Parser.NotSetContext,0) + + + def DOT(self): + return self.getToken(ANTLRv4Parser.DOT, 0) + + def elementOptions(self): + return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_atom + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterAtom(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitAtom(self) + + + + + def atom(self): + + localctx = ANTLRv4Parser.AtomContext(self, self._ctx, self.state) + self.enterRule(localctx, 104, self.RULE_atom) + self._la = 0 # Token type + try: + self.state = 550 + la_ = self._interp.adaptivePredict(self._input,68,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 542 + self.characterRange() + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 543 + self.terminal() + pass + + elif la_ == 3: + self.enterOuterAlt(localctx, 3) + self.state = 544 + self.ruleref() + pass + + elif la_ == 4: + self.enterOuterAlt(localctx, 4) + self.state = 545 + self.notSet() + pass + + elif la_ == 5: + self.enterOuterAlt(localctx, 5) + self.state = 546 + self.match(ANTLRv4Parser.DOT) + self.state = 548 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.LT: + self.state = 547 + self.elementOptions() + + + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class NotSetContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def NOT(self): + return self.getToken(ANTLRv4Parser.NOT, 0) + + def setElement(self): + return self.getTypedRuleContext(ANTLRv4Parser.SetElementContext,0) + + + def blockSet(self): + return self.getTypedRuleContext(ANTLRv4Parser.BlockSetContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_notSet + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterNotSet(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitNotSet(self) + + + + + def notSet(self): + + localctx = ANTLRv4Parser.NotSetContext(self, self._ctx, self.state) + self.enterRule(localctx, 106, self.RULE_notSet) + try: + self.state = 556 + la_ = self._interp.adaptivePredict(self._input,69,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 552 + self.match(ANTLRv4Parser.NOT) + self.state = 553 + self.setElement() + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 554 + self.match(ANTLRv4Parser.NOT) + self.state = 555 + self.blockSet() + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class BlockSetContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LPAREN(self): + return self.getToken(ANTLRv4Parser.LPAREN, 0) + + def setElement(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.SetElementContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.SetElementContext,i) + + + def RPAREN(self): + return self.getToken(ANTLRv4Parser.RPAREN, 0) + + def OR(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.OR) + else: + return self.getToken(ANTLRv4Parser.OR, i) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_blockSet + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterBlockSet(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitBlockSet(self) + + + + + def blockSet(self): + + localctx = ANTLRv4Parser.BlockSetContext(self, self._ctx, self.state) + self.enterRule(localctx, 108, self.RULE_blockSet) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 558 + self.match(ANTLRv4Parser.LPAREN) + self.state = 559 + self.setElement() + self.state = 564 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.OR: + self.state = 560 + self.match(ANTLRv4Parser.OR) + self.state = 561 + self.setElement() + self.state = 566 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 567 + self.match(ANTLRv4Parser.RPAREN) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class SetElementContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def TOKEN_REF(self): + return self.getToken(ANTLRv4Parser.TOKEN_REF, 0) + + def elementOptions(self): + return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0) + + + def STRING_LITERAL(self): + return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0) + + def characterRange(self): + return self.getTypedRuleContext(ANTLRv4Parser.CharacterRangeContext,0) + + + def LEXER_CHAR_SET(self): + return self.getToken(ANTLRv4Parser.LEXER_CHAR_SET, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_setElement + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterSetElement(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitSetElement(self) + + + + + def setElement(self): + + localctx = ANTLRv4Parser.SetElementContext(self, self._ctx, self.state) + self.enterRule(localctx, 110, self.RULE_setElement) + self._la = 0 # Token type + try: + self.state = 579 + la_ = self._interp.adaptivePredict(self._input,73,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 569 + self.match(ANTLRv4Parser.TOKEN_REF) + self.state = 571 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.LT: + self.state = 570 + self.elementOptions() + + + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 573 + self.match(ANTLRv4Parser.STRING_LITERAL) + self.state = 575 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.LT: + self.state = 574 + self.elementOptions() + + + pass + + elif la_ == 3: + self.enterOuterAlt(localctx, 3) + self.state = 577 + self.characterRange() + pass + + elif la_ == 4: + self.enterOuterAlt(localctx, 4) + self.state = 578 + self.match(ANTLRv4Parser.LEXER_CHAR_SET) + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class BlockContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LPAREN(self): + return self.getToken(ANTLRv4Parser.LPAREN, 0) + + def altList(self): + return self.getTypedRuleContext(ANTLRv4Parser.AltListContext,0) + + + def RPAREN(self): + return self.getToken(ANTLRv4Parser.RPAREN, 0) + + def COLON(self): + return self.getToken(ANTLRv4Parser.COLON, 0) + + def optionsSpec(self): + return self.getTypedRuleContext(ANTLRv4Parser.OptionsSpecContext,0) + + + def ruleAction(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.RuleActionContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.RuleActionContext,i) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_block + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterBlock(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitBlock(self) + + + + + def block(self): + + localctx = ANTLRv4Parser.BlockContext(self, self._ctx, self.state) + self.enterRule(localctx, 112, self.RULE_block) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 581 + self.match(ANTLRv4Parser.LPAREN) + self.state = 592 + _la = self._input.LA(1) + if (((_la) & ~0x3f) == 0 and ((1 << _la) & ((1 << ANTLRv4Parser.OPTIONS) | (1 << ANTLRv4Parser.COLON) | (1 << ANTLRv4Parser.AT))) != 0): + self.state = 583 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.OPTIONS: + self.state = 582 + self.optionsSpec() + + + self.state = 588 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.AT: + self.state = 585 + self.ruleAction() + self.state = 590 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 591 + self.match(ANTLRv4Parser.COLON) + + + self.state = 594 + self.altList() + self.state = 595 + self.match(ANTLRv4Parser.RPAREN) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class RulerefContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def RULE_REF(self): + return self.getToken(ANTLRv4Parser.RULE_REF, 0) + + def argActionBlock(self): + return self.getTypedRuleContext(ANTLRv4Parser.ArgActionBlockContext,0) + + + def elementOptions(self): + return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0) + + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_ruleref + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterRuleref(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitRuleref(self) + + + + + def ruleref(self): + + localctx = ANTLRv4Parser.RulerefContext(self, self._ctx, self.state) + self.enterRule(localctx, 114, self.RULE_ruleref) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 597 + self.match(ANTLRv4Parser.RULE_REF) + self.state = 599 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.BEGIN_ARGUMENT: + self.state = 598 + self.argActionBlock() + + + self.state = 602 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.LT: + self.state = 601 + self.elementOptions() + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class CharacterRangeContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def STRING_LITERAL(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.STRING_LITERAL) + else: + return self.getToken(ANTLRv4Parser.STRING_LITERAL, i) + + def RANGE(self): + return self.getToken(ANTLRv4Parser.RANGE, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_characterRange + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterCharacterRange(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitCharacterRange(self) + + + + + def characterRange(self): + + localctx = ANTLRv4Parser.CharacterRangeContext(self, self._ctx, self.state) + self.enterRule(localctx, 116, self.RULE_characterRange) + try: + self.enterOuterAlt(localctx, 1) + self.state = 604 + self.match(ANTLRv4Parser.STRING_LITERAL) + self.state = 605 + self.match(ANTLRv4Parser.RANGE) + self.state = 606 + self.match(ANTLRv4Parser.STRING_LITERAL) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class TerminalContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def TOKEN_REF(self): + return self.getToken(ANTLRv4Parser.TOKEN_REF, 0) + + def elementOptions(self): + return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionsContext,0) + + + def STRING_LITERAL(self): + return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_terminal + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterTerminal(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitTerminal(self) + + + + + def terminal(self): + + localctx = ANTLRv4Parser.TerminalContext(self, self._ctx, self.state) + self.enterRule(localctx, 118, self.RULE_terminal) + self._la = 0 # Token type + try: + self.state = 616 + token = self._input.LA(1) + if token in [ANTLRv4Parser.TOKEN_REF]: + self.enterOuterAlt(localctx, 1) + self.state = 608 + self.match(ANTLRv4Parser.TOKEN_REF) + self.state = 610 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.LT: + self.state = 609 + self.elementOptions() + + + + elif token in [ANTLRv4Parser.STRING_LITERAL]: + self.enterOuterAlt(localctx, 2) + self.state = 612 + self.match(ANTLRv4Parser.STRING_LITERAL) + self.state = 614 + _la = self._input.LA(1) + if _la==ANTLRv4Parser.LT: + self.state = 613 + self.elementOptions() + + + + else: + raise NoViableAltException(self) + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ElementOptionsContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def LT(self): + return self.getToken(ANTLRv4Parser.LT, 0) + + def elementOption(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.ElementOptionContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.ElementOptionContext,i) + + + def GT(self): + return self.getToken(ANTLRv4Parser.GT, 0) + + def COMMA(self, i:int=None): + if i is None: + return self.getTokens(ANTLRv4Parser.COMMA) + else: + return self.getToken(ANTLRv4Parser.COMMA, i) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_elementOptions + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterElementOptions(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitElementOptions(self) + + + + + def elementOptions(self): + + localctx = ANTLRv4Parser.ElementOptionsContext(self, self._ctx, self.state) + self.enterRule(localctx, 120, self.RULE_elementOptions) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 618 + self.match(ANTLRv4Parser.LT) + self.state = 619 + self.elementOption() + self.state = 624 + self._errHandler.sync(self) + _la = self._input.LA(1) + while _la==ANTLRv4Parser.COMMA: + self.state = 620 + self.match(ANTLRv4Parser.COMMA) + self.state = 621 + self.elementOption() + self.state = 626 + self._errHandler.sync(self) + _la = self._input.LA(1) + + self.state = 627 + self.match(ANTLRv4Parser.GT) + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class ElementOptionContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def identifier(self, i:int=None): + if i is None: + return self.getTypedRuleContexts(ANTLRv4Parser.IdentifierContext) + else: + return self.getTypedRuleContext(ANTLRv4Parser.IdentifierContext,i) + + + def ASSIGN(self): + return self.getToken(ANTLRv4Parser.ASSIGN, 0) + + def STRING_LITERAL(self): + return self.getToken(ANTLRv4Parser.STRING_LITERAL, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_elementOption + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterElementOption(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitElementOption(self) + + + + + def elementOption(self): + + localctx = ANTLRv4Parser.ElementOptionContext(self, self._ctx, self.state) + self.enterRule(localctx, 122, self.RULE_elementOption) + try: + self.state = 636 + la_ = self._interp.adaptivePredict(self._input,84,self._ctx) + if la_ == 1: + self.enterOuterAlt(localctx, 1) + self.state = 629 + self.identifier() + pass + + elif la_ == 2: + self.enterOuterAlt(localctx, 2) + self.state = 630 + self.identifier() + self.state = 631 + self.match(ANTLRv4Parser.ASSIGN) + self.state = 634 + token = self._input.LA(1) + if token in [ANTLRv4Parser.TOKEN_REF, ANTLRv4Parser.RULE_REF]: + self.state = 632 + self.identifier() + + elif token in [ANTLRv4Parser.STRING_LITERAL]: + self.state = 633 + self.match(ANTLRv4Parser.STRING_LITERAL) + + else: + raise NoViableAltException(self) + + pass + + + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + class IdentifierContext(ParserRuleContext): + + def __init__(self, parser, parent:ParserRuleContext=None, invokingState:int=-1): + super().__init__(parent, invokingState) + self.parser = parser + + def RULE_REF(self): + return self.getToken(ANTLRv4Parser.RULE_REF, 0) + + def TOKEN_REF(self): + return self.getToken(ANTLRv4Parser.TOKEN_REF, 0) + + def getRuleIndex(self): + return ANTLRv4Parser.RULE_identifier + + def enterRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.enterIdentifier(self) + + def exitRule(self, listener:ParseTreeListener): + if isinstance( listener, ANTLRv4ParserListener ): + listener.exitIdentifier(self) + + + + + def identifier(self): + + localctx = ANTLRv4Parser.IdentifierContext(self, self._ctx, self.state) + self.enterRule(localctx, 124, self.RULE_identifier) + self._la = 0 # Token type + try: + self.enterOuterAlt(localctx, 1) + self.state = 638 + _la = self._input.LA(1) + if not(_la==ANTLRv4Parser.TOKEN_REF or _la==ANTLRv4Parser.RULE_REF): + self._errHandler.recoverInline(self) + else: + self.consume() + except RecognitionException as re: + localctx.exception = re + self._errHandler.reportError(self, re) + self._errHandler.recover(self, re) + finally: + self.exitRule() + return localctx + + + + diff --git a/tools/grammar-analysis/ANTLRv4ParserListener.py b/tools/grammar-analysis/ANTLRv4ParserListener.py new file mode 100644 index 00000000..b2f04416 --- /dev/null +++ b/tools/grammar-analysis/ANTLRv4ParserListener.py @@ -0,0 +1,573 @@ +# Generated from java-escape by ANTLR 4.5 +from antlr4 import * + +# This class defines a complete listener for a parse tree produced by ANTLRv4Parser. +class ANTLRv4ParserListener(ParseTreeListener): + + # Enter a parse tree produced by ANTLRv4Parser#grammarSpec. + def enterGrammarSpec(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#grammarSpec. + def exitGrammarSpec(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#grammarType. + def enterGrammarType(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#grammarType. + def exitGrammarType(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#prequelConstruct. + def enterPrequelConstruct(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#prequelConstruct. + def exitPrequelConstruct(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#optionsSpec. + def enterOptionsSpec(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#optionsSpec. + def exitOptionsSpec(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#option. + def enterOption(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#option. + def exitOption(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#optionValue. + def enterOptionValue(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#optionValue. + def exitOptionValue(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#delegateGrammars. + def enterDelegateGrammars(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#delegateGrammars. + def exitDelegateGrammars(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#delegateGrammar. + def enterDelegateGrammar(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#delegateGrammar. + def exitDelegateGrammar(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#tokensSpec. + def enterTokensSpec(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#tokensSpec. + def exitTokensSpec(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#channelsSpec. + def enterChannelsSpec(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#channelsSpec. + def exitChannelsSpec(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#idList. + def enterIdList(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#idList. + def exitIdList(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#action. + def enterAction(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#action. + def exitAction(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#actionScopeName. + def enterActionScopeName(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#actionScopeName. + def exitActionScopeName(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#actionBlock. + def enterActionBlock(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#actionBlock. + def exitActionBlock(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#argActionBlock. + def enterArgActionBlock(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#argActionBlock. + def exitArgActionBlock(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#modeSpec. + def enterModeSpec(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#modeSpec. + def exitModeSpec(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#rules. + def enterRules(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#rules. + def exitRules(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#ruleSpec. + def enterRuleSpec(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#ruleSpec. + def exitRuleSpec(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#parserRuleSpec. + def enterParserRuleSpec(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#parserRuleSpec. + def exitParserRuleSpec(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#exceptionGroup. + def enterExceptionGroup(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#exceptionGroup. + def exitExceptionGroup(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#exceptionHandler. + def enterExceptionHandler(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#exceptionHandler. + def exitExceptionHandler(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#finallyClause. + def enterFinallyClause(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#finallyClause. + def exitFinallyClause(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#rulePrequel. + def enterRulePrequel(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#rulePrequel. + def exitRulePrequel(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#ruleReturns. + def enterRuleReturns(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#ruleReturns. + def exitRuleReturns(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#throwsSpec. + def enterThrowsSpec(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#throwsSpec. + def exitThrowsSpec(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#localsSpec. + def enterLocalsSpec(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#localsSpec. + def exitLocalsSpec(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#ruleAction. + def enterRuleAction(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#ruleAction. + def exitRuleAction(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#ruleModifiers. + def enterRuleModifiers(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#ruleModifiers. + def exitRuleModifiers(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#ruleModifier. + def enterRuleModifier(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#ruleModifier. + def exitRuleModifier(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#ruleBlock. + def enterRuleBlock(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#ruleBlock. + def exitRuleBlock(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#ruleAltList. + def enterRuleAltList(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#ruleAltList. + def exitRuleAltList(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#labeledAlt. + def enterLabeledAlt(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#labeledAlt. + def exitLabeledAlt(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#lexerRuleSpec. + def enterLexerRuleSpec(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#lexerRuleSpec. + def exitLexerRuleSpec(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#lexerRuleBlock. + def enterLexerRuleBlock(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#lexerRuleBlock. + def exitLexerRuleBlock(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#lexerAltList. + def enterLexerAltList(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#lexerAltList. + def exitLexerAltList(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#lexerAlt. + def enterLexerAlt(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#lexerAlt. + def exitLexerAlt(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#lexerElements. + def enterLexerElements(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#lexerElements. + def exitLexerElements(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#lexerElement. + def enterLexerElement(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#lexerElement. + def exitLexerElement(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#labeledLexerElement. + def enterLabeledLexerElement(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#labeledLexerElement. + def exitLabeledLexerElement(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#lexerBlock. + def enterLexerBlock(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#lexerBlock. + def exitLexerBlock(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#lexerCommands. + def enterLexerCommands(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#lexerCommands. + def exitLexerCommands(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#lexerCommand. + def enterLexerCommand(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#lexerCommand. + def exitLexerCommand(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#lexerCommandName. + def enterLexerCommandName(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#lexerCommandName. + def exitLexerCommandName(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#lexerCommandExpr. + def enterLexerCommandExpr(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#lexerCommandExpr. + def exitLexerCommandExpr(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#altList. + def enterAltList(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#altList. + def exitAltList(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#alternative. + def enterAlternative(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#alternative. + def exitAlternative(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#element. + def enterElement(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#element. + def exitElement(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#labeledElement. + def enterLabeledElement(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#labeledElement. + def exitLabeledElement(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#ebnf. + def enterEbnf(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#ebnf. + def exitEbnf(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#blockSuffix. + def enterBlockSuffix(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#blockSuffix. + def exitBlockSuffix(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#ebnfSuffix. + def enterEbnfSuffix(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#ebnfSuffix. + def exitEbnfSuffix(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#lexerAtom. + def enterLexerAtom(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#lexerAtom. + def exitLexerAtom(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#atom. + def enterAtom(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#atom. + def exitAtom(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#notSet. + def enterNotSet(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#notSet. + def exitNotSet(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#blockSet. + def enterBlockSet(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#blockSet. + def exitBlockSet(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#setElement. + def enterSetElement(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#setElement. + def exitSetElement(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#block. + def enterBlock(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#block. + def exitBlock(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#ruleref. + def enterRuleref(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#ruleref. + def exitRuleref(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#characterRange. + def enterCharacterRange(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#characterRange. + def exitCharacterRange(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#terminal. + def enterTerminal(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#terminal. + def exitTerminal(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#elementOptions. + def enterElementOptions(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#elementOptions. + def exitElementOptions(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#elementOption. + def enterElementOption(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#elementOption. + def exitElementOption(self, ctx): + pass + + + # Enter a parse tree produced by ANTLRv4Parser#identifier. + def enterIdentifier(self, ctx): + pass + + # Exit a parse tree produced by ANTLRv4Parser#identifier. + def exitIdentifier(self, ctx): + pass + + diff --git a/tools/grammar-analysis/LexBasic.py b/tools/grammar-analysis/LexBasic.py new file mode 100644 index 00000000..6096304d --- /dev/null +++ b/tools/grammar-analysis/LexBasic.py @@ -0,0 +1,175 @@ +# Generated from java-escape by ANTLR 4.5 +from antlr4 import * +from io import StringIO + + +def serializedATN(): + with StringIO() as buf: + buf.write("\3\u0430\ud6d1\u8206\uad2d\u4417\uaef1\u8d80\uaadd\2\2") + buf.write("\u0130\b\1\4\2\t\2\4\3\t\3\4\4\t\4\4\5\t\5\4\6\t\6\4\7") + buf.write("\t\7\4\b\t\b\4\t\t\t\4\n\t\n\4\13\t\13\4\f\t\f\4\r\t\r") + buf.write("\4\16\t\16\4\17\t\17\4\20\t\20\4\21\t\21\4\22\t\22\4\23") + buf.write("\t\23\4\24\t\24\4\25\t\25\4\26\t\26\4\27\t\27\4\30\t\30") + buf.write("\4\31\t\31\4\32\t\32\4\33\t\33\4\34\t\34\4\35\t\35\4\36") + buf.write("\t\36\4\37\t\37\4 \t \4!\t!\4\"\t\"\4#\t#\4$\t$\4%\t%") + buf.write("\4&\t&\4\'\t\'\4(\t(\4)\t)\4*\t*\4+\t+\4,\t,\4-\t-\4.") + buf.write("\t.\4/\t/\4\60\t\60\4\61\t\61\4\62\t\62\3\2\3\2\5\2h\n") + buf.write("\2\3\3\3\3\3\4\3\4\3\5\3\5\3\5\3\5\7\5r\n\5\f\5\16\5u") + buf.write("\13\5\3\5\3\5\3\5\5\5z\n\5\3\6\3\6\3\6\3\6\3\6\7\6\u0081") + buf.write("\n\6\f\6\16\6\u0084\13\6\3\6\3\6\3\6\5\6\u0089\n\6\3\7") + buf.write("\3\7\3\7\3\7\7\7\u008f\n\7\f\7\16\7\u0092\13\7\3\b\3\b") + buf.write("\3\b\3\b\3\b\5\b\u0099\n\b\3\t\3\t\3\t\3\n\3\n\3\n\3\n") + buf.write("\3\n\5\n\u00a3\n\n\5\n\u00a5\n\n\5\n\u00a7\n\n\5\n\u00a9") + buf.write("\n\n\3\13\3\13\3\13\7\13\u00ae\n\13\f\13\16\13\u00b1\13") + buf.write("\13\5\13\u00b3\n\13\3\f\3\f\3\r\3\r\3\16\3\16\3\16\3\16") + buf.write("\3\16\3\16\3\16\3\16\3\16\5\16\u00c2\n\16\3\17\3\17\3") + buf.write("\17\5\17\u00c7\n\17\3\17\3\17\3\20\3\20\3\20\7\20\u00ce") + buf.write("\n\20\f\20\16\20\u00d1\13\20\3\20\3\20\3\21\3\21\3\21") + buf.write("\7\21\u00d8\n\21\f\21\16\21\u00db\13\21\3\21\3\21\3\22") + buf.write("\3\22\3\22\7\22\u00e2\n\22\f\22\16\22\u00e5\13\22\3\23") + buf.write("\3\23\3\23\3\23\5\23\u00eb\n\23\3\24\3\24\3\25\3\25\3") + buf.write("\25\3\25\3\26\3\26\3\27\3\27\3\30\3\30\3\30\3\31\3\31") + buf.write("\3\32\3\32\3\33\3\33\3\34\3\34\3\35\3\35\3\36\3\36\3\37") + buf.write("\3\37\3 \3 \3!\3!\3!\3\"\3\"\3#\3#\3$\3$\3%\3%\3&\3&\3") + buf.write("\'\3\'\3(\3(\3(\3)\3)\3*\3*\3+\3+\3,\3,\3-\3-\3.\3.\3") + buf.write("/\3/\3/\3\60\3\60\3\61\3\61\3\62\3\62\4s\u0082\2\63\3") + buf.write("\2\5\2\7\2\t\2\13\2\r\2\17\2\21\2\23\2\25\2\27\2\31\2") + buf.write("\33\2\35\2\37\2!\2#\2%\2\'\2)\2+\2-\2/\2\61\2\63\2\65") + buf.write("\2\67\29\2;\2=\2?\2A\2C\2E\2G\2I\2K\2M\2O\2Q\2S\2U\2W") + buf.write("\2Y\2[\2]\2_\2a\2c\2\3\2\r\4\2\13\13\"\"\4\2\f\f\16\17") + buf.write("\4\2\f\f\17\17\n\2$$))^^ddhhppttvv\3\2\63;\5\2\62;CHc") + buf.write("h\3\2\62;\6\2\f\f\17\17))^^\6\2\f\f\17\17$$^^\5\2\u00b9") + buf.write("\u00b9\u0302\u0371\u2041\u2042\17\2C\\c|\u00c2\u00d8\u00da") + buf.write("\u00f8\u00fa\u0301\u0372\u037f\u0381\u2001\u200e\u200f") + buf.write("\u2072\u2191\u2c02\u2ff1\u3003\ud801\uf902\ufdd1\ufdf2") + buf.write("\uffff\u0118\3g\3\2\2\2\5i\3\2\2\2\7k\3\2\2\2\tm\3\2\2") + buf.write("\2\13{\3\2\2\2\r\u008a\3\2\2\2\17\u0093\3\2\2\2\21\u009a") + buf.write("\3\2\2\2\23\u009d\3\2\2\2\25\u00b2\3\2\2\2\27\u00b4\3") + buf.write("\2\2\2\31\u00b6\3\2\2\2\33\u00c1\3\2\2\2\35\u00c3\3\2") + buf.write("\2\2\37\u00ca\3\2\2\2!\u00d4\3\2\2\2#\u00de\3\2\2\2%\u00ea") + buf.write("\3\2\2\2\'\u00ec\3\2\2\2)\u00ee\3\2\2\2+\u00f2\3\2\2\2") + buf.write("-\u00f4\3\2\2\2/\u00f6\3\2\2\2\61\u00f9\3\2\2\2\63\u00fb") + buf.write("\3\2\2\2\65\u00fd\3\2\2\2\67\u00ff\3\2\2\29\u0101\3\2") + buf.write("\2\2;\u0103\3\2\2\2=\u0105\3\2\2\2?\u0107\3\2\2\2A\u0109") + buf.write("\3\2\2\2C\u010c\3\2\2\2E\u010e\3\2\2\2G\u0110\3\2\2\2") + buf.write("I\u0112\3\2\2\2K\u0114\3\2\2\2M\u0116\3\2\2\2O\u0118\3") + buf.write("\2\2\2Q\u011b\3\2\2\2S\u011d\3\2\2\2U\u011f\3\2\2\2W\u0121") + buf.write("\3\2\2\2Y\u0123\3\2\2\2[\u0125\3\2\2\2]\u0127\3\2\2\2") + buf.write("_\u012a\3\2\2\2a\u012c\3\2\2\2c\u012e\3\2\2\2eh\5\5\3") + buf.write("\2fh\5\7\4\2ge\3\2\2\2gf\3\2\2\2h\4\3\2\2\2ij\t\2\2\2") + buf.write("j\6\3\2\2\2kl\t\3\2\2l\b\3\2\2\2mn\7\61\2\2no\7,\2\2o") + buf.write("s\3\2\2\2pr\13\2\2\2qp\3\2\2\2ru\3\2\2\2st\3\2\2\2sq\3") + buf.write("\2\2\2ty\3\2\2\2us\3\2\2\2vw\7,\2\2wz\7\61\2\2xz\7\2\2") + buf.write("\3yv\3\2\2\2yx\3\2\2\2z\n\3\2\2\2{|\7\61\2\2|}\7,\2\2") + buf.write("}~\7,\2\2~\u0082\3\2\2\2\177\u0081\13\2\2\2\u0080\177") + buf.write("\3\2\2\2\u0081\u0084\3\2\2\2\u0082\u0083\3\2\2\2\u0082") + buf.write("\u0080\3\2\2\2\u0083\u0088\3\2\2\2\u0084\u0082\3\2\2\2") + buf.write("\u0085\u0086\7,\2\2\u0086\u0089\7\61\2\2\u0087\u0089\7") + buf.write("\2\2\3\u0088\u0085\3\2\2\2\u0088\u0087\3\2\2\2\u0089\f") + buf.write("\3\2\2\2\u008a\u008b\7\61\2\2\u008b\u008c\7\61\2\2\u008c") + buf.write("\u0090\3\2\2\2\u008d\u008f\n\4\2\2\u008e\u008d\3\2\2\2") + buf.write("\u008f\u0092\3\2\2\2\u0090\u008e\3\2\2\2\u0090\u0091\3") + buf.write("\2\2\2\u0091\16\3\2\2\2\u0092\u0090\3\2\2\2\u0093\u0098") + buf.write("\5+\26\2\u0094\u0099\t\5\2\2\u0095\u0099\5\23\n\2\u0096") + buf.write("\u0099\13\2\2\2\u0097\u0099\7\2\2\3\u0098\u0094\3\2\2") + buf.write("\2\u0098\u0095\3\2\2\2\u0098\u0096\3\2\2\2\u0098\u0097") + buf.write("\3\2\2\2\u0099\20\3\2\2\2\u009a\u009b\5+\26\2\u009b\u009c") + buf.write("\13\2\2\2\u009c\22\3\2\2\2\u009d\u00a8\7w\2\2\u009e\u00a6") + buf.write("\5\27\f\2\u009f\u00a4\5\27\f\2\u00a0\u00a2\5\27\f\2\u00a1") + buf.write("\u00a3\5\27\f\2\u00a2\u00a1\3\2\2\2\u00a2\u00a3\3\2\2") + buf.write("\2\u00a3\u00a5\3\2\2\2\u00a4\u00a0\3\2\2\2\u00a4\u00a5") + buf.write("\3\2\2\2\u00a5\u00a7\3\2\2\2\u00a6\u009f\3\2\2\2\u00a6") + buf.write("\u00a7\3\2\2\2\u00a7\u00a9\3\2\2\2\u00a8\u009e\3\2\2\2") + buf.write("\u00a8\u00a9\3\2\2\2\u00a9\24\3\2\2\2\u00aa\u00b3\7\62") + buf.write("\2\2\u00ab\u00af\t\6\2\2\u00ac\u00ae\5\31\r\2\u00ad\u00ac") + buf.write("\3\2\2\2\u00ae\u00b1\3\2\2\2\u00af\u00ad\3\2\2\2\u00af") + buf.write("\u00b0\3\2\2\2\u00b0\u00b3\3\2\2\2\u00b1\u00af\3\2\2\2") + buf.write("\u00b2\u00aa\3\2\2\2\u00b2\u00ab\3\2\2\2\u00b3\26\3\2") + buf.write("\2\2\u00b4\u00b5\t\7\2\2\u00b5\30\3\2\2\2\u00b6\u00b7") + buf.write("\t\b\2\2\u00b7\32\3\2\2\2\u00b8\u00b9\7v\2\2\u00b9\u00ba") + buf.write("\7t\2\2\u00ba\u00bb\7w\2\2\u00bb\u00c2\7g\2\2\u00bc\u00bd") + buf.write("\7h\2\2\u00bd\u00be\7c\2\2\u00be\u00bf\7n\2\2\u00bf\u00c0") + buf.write("\7u\2\2\u00c0\u00c2\7g\2\2\u00c1\u00b8\3\2\2\2\u00c1\u00bc") + buf.write("\3\2\2\2\u00c2\34\3\2\2\2\u00c3\u00c6\5\61\31\2\u00c4") + buf.write("\u00c7\5\17\b\2\u00c5\u00c7\n\t\2\2\u00c6\u00c4\3\2\2") + buf.write("\2\u00c6\u00c5\3\2\2\2\u00c7\u00c8\3\2\2\2\u00c8\u00c9") + buf.write("\5\61\31\2\u00c9\36\3\2\2\2\u00ca\u00cf\5\61\31\2\u00cb") + buf.write("\u00ce\5\17\b\2\u00cc\u00ce\n\t\2\2\u00cd\u00cb\3\2\2") + buf.write("\2\u00cd\u00cc\3\2\2\2\u00ce\u00d1\3\2\2\2\u00cf\u00cd") + buf.write("\3\2\2\2\u00cf\u00d0\3\2\2\2\u00d0\u00d2\3\2\2\2\u00d1") + buf.write("\u00cf\3\2\2\2\u00d2\u00d3\5\61\31\2\u00d3 \3\2\2\2\u00d4") + buf.write("\u00d9\5\63\32\2\u00d5\u00d8\5\17\b\2\u00d6\u00d8\n\n") + buf.write("\2\2\u00d7\u00d5\3\2\2\2\u00d7\u00d6\3\2\2\2\u00d8\u00db") + buf.write("\3\2\2\2\u00d9\u00d7\3\2\2\2\u00d9\u00da\3\2\2\2\u00da") + buf.write("\u00dc\3\2\2\2\u00db\u00d9\3\2\2\2\u00dc\u00dd\5\63\32") + buf.write("\2\u00dd\"\3\2\2\2\u00de\u00e3\5\61\31\2\u00df\u00e2\5") + buf.write("\17\b\2\u00e0\u00e2\n\t\2\2\u00e1\u00df\3\2\2\2\u00e1") + buf.write("\u00e0\3\2\2\2\u00e2\u00e5\3\2\2\2\u00e3\u00e1\3\2\2\2") + buf.write("\u00e3\u00e4\3\2\2\2\u00e4$\3\2\2\2\u00e5\u00e3\3\2\2") + buf.write("\2\u00e6\u00eb\5\'\24\2\u00e7\u00eb\4\62;\2\u00e8\u00eb") + buf.write("\5Q)\2\u00e9\u00eb\t\13\2\2\u00ea\u00e6\3\2\2\2\u00ea") + buf.write("\u00e7\3\2\2\2\u00ea\u00e8\3\2\2\2\u00ea\u00e9\3\2\2\2") + buf.write("\u00eb&\3\2\2\2\u00ec\u00ed\t\f\2\2\u00ed(\3\2\2\2\u00ee") + buf.write("\u00ef\7k\2\2\u00ef\u00f0\7p\2\2\u00f0\u00f1\7v\2\2\u00f1") + buf.write("*\3\2\2\2\u00f2\u00f3\7^\2\2\u00f3,\3\2\2\2\u00f4\u00f5") + buf.write("\7<\2\2\u00f5.\3\2\2\2\u00f6\u00f7\7<\2\2\u00f7\u00f8") + buf.write("\7<\2\2\u00f8\60\3\2\2\2\u00f9\u00fa\7)\2\2\u00fa\62\3") + buf.write("\2\2\2\u00fb\u00fc\7$\2\2\u00fc\64\3\2\2\2\u00fd\u00fe") + buf.write("\7*\2\2\u00fe\66\3\2\2\2\u00ff\u0100\7+\2\2\u01008\3\2") + buf.write("\2\2\u0101\u0102\7}\2\2\u0102:\3\2\2\2\u0103\u0104\7\177") + buf.write("\2\2\u0104<\3\2\2\2\u0105\u0106\7]\2\2\u0106>\3\2\2\2") + buf.write("\u0107\u0108\7_\2\2\u0108@\3\2\2\2\u0109\u010a\7/\2\2") + buf.write("\u010a\u010b\7@\2\2\u010bB\3\2\2\2\u010c\u010d\7>\2\2") + buf.write("\u010dD\3\2\2\2\u010e\u010f\7@\2\2\u010fF\3\2\2\2\u0110") + buf.write("\u0111\7?\2\2\u0111H\3\2\2\2\u0112\u0113\7A\2\2\u0113") + buf.write("J\3\2\2\2\u0114\u0115\7,\2\2\u0115L\3\2\2\2\u0116\u0117") + buf.write("\7-\2\2\u0117N\3\2\2\2\u0118\u0119\7-\2\2\u0119\u011a") + buf.write("\7?\2\2\u011aP\3\2\2\2\u011b\u011c\7a\2\2\u011cR\3\2\2") + buf.write("\2\u011d\u011e\7~\2\2\u011eT\3\2\2\2\u011f\u0120\7&\2") + buf.write("\2\u0120V\3\2\2\2\u0121\u0122\7.\2\2\u0122X\3\2\2\2\u0123") + buf.write("\u0124\7=\2\2\u0124Z\3\2\2\2\u0125\u0126\7\60\2\2\u0126") + buf.write("\\\3\2\2\2\u0127\u0128\7\60\2\2\u0128\u0129\7\60\2\2\u0129") + buf.write("^\3\2\2\2\u012a\u012b\7B\2\2\u012b`\3\2\2\2\u012c\u012d") + buf.write("\7%\2\2\u012db\3\2\2\2\u012e\u012f\7\u0080\2\2\u012fd") + buf.write("\3\2\2\2\31\2gsy\u0082\u0088\u0090\u0098\u00a2\u00a4\u00a6") + buf.write("\u00a8\u00af\u00b2\u00c1\u00c6\u00cd\u00cf\u00d7\u00d9") + buf.write("\u00e1\u00e3\u00ea\2") + return buf.getvalue() + + +class LexBasic(Lexer): + + atn = ATNDeserializer().deserialize(serializedATN()) + + decisionsToDFA = [ DFA(ds, i) for i, ds in enumerate(atn.decisionToState) ] + + + + modeNames = [ u"DEFAULT_MODE" ] + + literalNames = [ u"", + ] + + symbolicNames = [ u"", + ] + + ruleNames = [ "Ws", "Hws", "Vws", "BlockComment", "DocComment", "LineComment", + "EscSeq", "EscAny", "UnicodeEsc", "DecimalNumeral", "HexDigit", + "DecDigit", "BoolLiteral", "CharLiteral", "SQuoteLiteral", + "DQuoteLiteral", "USQuoteLiteral", "NameChar", "NameStartChar", + "Int", "Esc", "Colon", "DColon", "SQuote", "DQuote", "LParen", + "RParen", "LBrace", "RBrace", "LBrack", "RBrack", "RArrow", + "Lt", "Gt", "Equal", "Question", "Star", "Plus", "PlusAssign", + "Underscore", "Pipe", "Dollar", "Comma", "Semi", "Dot", + "Range", "At", "Pound", "Tilde" ] + + grammarFileName = "LexBasic.g4" + + def __init__(self, input=None): + super().__init__(input) + self.checkVersion("4.5") + self._interp = LexerATNSimulator(self, self.atn, self.decisionsToDFA, PredictionContextCache()) + self._actions = None + self._predicates = None + + diff --git a/tools/grammar-analysis/LexerAdaptor.py b/tools/grammar-analysis/LexerAdaptor.py new file mode 100644 index 00000000..369eefbb --- /dev/null +++ b/tools/grammar-analysis/LexerAdaptor.py @@ -0,0 +1,66 @@ +from antlr4 import * + + +class LexerAdaptor(Lexer): + + """ + Track whether we are inside of a rule and whether it is lexical parser. _currentRuleType==Token.INVALID_TYPE + means that we are outside of a rule. At the first sign of a rule name reference and _currentRuleType==invalid, we + can assume that we are starting a parser rule. Similarly, seeing a token reference when not already in rule means + starting a token rule. The terminating ';' of a rule, flips this back to invalid type. + + This is not perfect logic but works. For example, "grammar T;" means that we start and stop a lexical rule for + the "T;". Dangerous but works. + + The whole point of this state information is to distinguish between [..arg actions..] and [charsets]. Char sets + can only occur in lexical rules and arg actions cannot occur. + """ + + _currentRuleType = Token.INVALID_TYPE + + def __init__(self, inp): + Lexer.__init__(self, inp) + + def getCurrentRuleType(self): + return self._currentRuleType + + def setCurrentRuleType(self, ruleType): + self._currentRuleType = ruleType + + def handleBeginArgument(self): + if self.inLexerRule(): + self.pushMode(self.LexerCharSet) + self.more() + else: + self.pushMode(self.Argument) + + def handleEndArgument(self): + self.popMode() + if len(self._modeStack) > 0: + self.setType(self.ARGUMENT_CONTENT) + + def handleEndAction(self): + self.popMode() + if len(self._modeStack) > 0: + self.setType(self.ACTION_CONTENT) + + def emit(self): + if self._type == self.ID: + firstChar = self._input.getText(self._tokenStartCharIndex, self._tokenStartCharIndex) + if firstChar[0].isupper(): + self._type = self.TOKEN_REF + else: + self._type = self.RULE_REF + + if self._currentRuleType == Token.INVALID_TYPE: # if outside of rule def + self._currentRuleType = self._type # set to inside lexer or parser rule + + elif self._type == self.SEMI: # exit rule def + self._currentRuleType = Token.INVALID_TYPE + return Lexer.emit(self) + + def inLexerRule(self): + return self._currentRuleType == self.TOKEN_REF + + def inParserRule(self): # not used, but added for clarity + return self._currentRuleType == self.RULE_REF diff --git a/tools/grammar-analysis/README.md b/tools/grammar-analysis/README.md new file mode 100644 index 00000000..538a0ddc --- /dev/null +++ b/tools/grammar-analysis/README.md @@ -0,0 +1,6 @@ +# grammar-analysis + +This is meant to be used as a tool assisting the creation of Droplet modes. It parses antlr4 grammar files and searches for +rules that can have other rules as their only children, for instance in C mode, +`additiveExpression -> multiplicativeExpression`. These are used for droppability rule computations. In the future it may +need to support more detailed analysis. diff --git a/tools/grammar-analysis/generate.py b/tools/grammar-analysis/generate.py new file mode 100644 index 00000000..a0586227 --- /dev/null +++ b/tools/grammar-analysis/generate.py @@ -0,0 +1,257 @@ +# ANTLR4 grammar file analysis for Droplet mode creation. +# Copyright (c) Anthony Bau 2016 +# MIT License. + +from antlr4 import * +from ANTLRv4Lexer import ANTLRv4Lexer +from ANTLRv4Parser import ANTLRv4Parser +import json +import sys + +EMPTY = 0 + +class DirectedGraphNode(): + def __init__(self, label = None): + self.label = label + self.out_edges = set() + self.in_edges = set() + + def connect_out(self, other): + other.in_edges.append(self) + self.out_edges.append(other) + + def connect_in(self, other): + other.connect_out(self) + +class DirectedGraph(): + def __init__(self): + self.nodes = dict() + + def append(self, label): + self.nodes[label] = DirectedGraphNode(label = label) + + def add_connection(self, source, dest): + self.nodes[source].connect_out(self.nodes[dest]) + +# EBNF SUFFIXES have the following properties: +# +# QUESTION -> ONE OR ZERO +# STAR -> ONE OR ZERO +# PLUS -> ONE + +class Expression(): + def __init__(self, alt_list): + self.alternatives = [Alternative(alternative) for alternative in alt_list] + + def get_options(self, lookup_table): + result = set() + + for alternative in self.alternatives: + result = result.union(alternative.get_options(lookup_table)) + + return result + +class Alternative(): + def __init__(self, alternative): + self.elements = [Element(el) for el in alternative.filter_children('element')] + + def get_options(self, lookup_table): + element_options = [element.get_options(lookup_table) for element in self.elements] + + number_zeroable = sum(1 for element in element_options if EMPTY in element) + + # If everything can be the empty string, + # Then we could take on the value of any of our children + if number_zeroable == len(element_options): + result = set() + for options in element_options: + result = result.union(options) + return result + + # If only one thing can't be the empty string, we have exactly its options. + elif number_zeroable == len(element_options) - 1: + for option in element_options: + if EMPTY not in option: + return option + + # Otherwise, there is no other single rule that we can be. + else: + return set() + +class Element(): + def __init__(self, element): + suffix = element.find_child('ebnfSuffix') + if suffix is not None and suffix.children[0].type in ['QUESTION', 'STAR']: + self.add_zero = True + else: + self.add_zero = False + + child = element.find_child('labeledElement') + if child is not None: + atom = child.find_child('atom') + if atom is not None: + self.child = Atom(atom) + return + + block = child.find_child('block') + if block is not None: + self.child = Expression(block.find_child('altList').filter_children('alternative')) + return + + child = element.find_child('atom') + if child is not None: + self.child = Atom(child) + return + + child = element.find_child('ebnf') + if child is not None: + # EBNF can also have a suffix + suffix = child.find_child('blockSuffix') + if suffix is not None and suffix.find_child('ebnfSuffix').children[0].type in ['QUESTION', 'STAR']: + self.add_zero = True + + # EBNF contains and additional expression + self.child = Expression(child.find_child('block').find_child('altList').filter_children('alternative')) + return + + self.child = None + + def get_options(self, lookup_table): + if self.child is not None: + result = self.child.get_options(lookup_table) + if self.add_zero: + result.add(EMPTY) + return result + else: + return set() + +class Atom(): + def __init__(self, atom): + ruleref = atom.find_child('ruleref') + if ruleref is not None: + self.ruleref = ruleref.find_child('RULE_REF').text + else: + self.ruleref = None + + def get_options(self, lookup_table): + if self.ruleref is not None: + if lookup_table[self.ruleref]: + return {self.ruleref, EMPTY} + else: + return {self.ruleref} + else: + return set() + +# Get the rules from the grammar spec +def process(grammarSpec): + graph = DirectedGraph() + can_rule_be_empty = dict() + + # Search for the rule node + for child in grammarSpec.children: + if child.type == 'rules': + rules = child + + # How many rules did we extract? + # Get the array of rule names + rule_names = set() + rule_expressions = dict() + for rule in rules.children: + if rule.children[0].type == 'parserRuleSpec': + parser_rule = rule.children[0] + + rule_name = parser_rule.find_child('RULE_REF').text + rule_names.add(rule_name) + + alt_list = parser_rule.find_child('ruleBlock').find_child('ruleAltList') + rule_expressions[rule_name] = Expression([el.find_child('alternative') for el in alt_list.filter_children('labeledAlt')]) + + lookup_table = dict() + for rule_name in rule_names: + lookup_table[rule_name] = False + + connections = dict() + for rule_name in rule_names: + connections[rule_name] = set() + + # Keep updating the list of things that can be empty + # until it does not change. + changed = True + while changed: + changed = False + for rule_name in rule_names: + new_connections = rule_expressions[rule_name].get_options(lookup_table) + if not changed and new_connections != connections[rule_name]: + changed = True + connections[rule_name] = new_connections + + for rule_name in rule_names: + if EMPTY in connections[rule_name]: + lookup_table[rule_name] = True + + return connections + +# Raw parse tree nodes +class Node(): + def __init__(self, type, children): + self.type = type + self.children = children + + def dictify(self): + return {"type": self.type, "children": [child.dictify() for child in self.children]} + + def find_child(self, type): + for child in self.children: + if child.type == type: + return child + return None + + def filter_children(self, type): + result = [] + for child in self.children: + if child.type == type: + result.append(child) + return result + +class Token(): + def __init__(self, type, text): + self.type = type + self.text = text + + def dictify(self): + return {"type": self.type, "text": self.text} + +def format(tree): + if isinstance(tree, ParserRuleContext): + children = [format(child) for child in tree.getChildren()] + return Node( + tree.parser.ruleNames[tree.getRuleIndex()], + children + ) + else: + return Token( + tree.parentCtx.parser.symbolicNames[tree.symbol.type], + tree.symbol.text + ) + + +# Main runtime +def main(argv): + input = FileStream(argv[1]) + lexer = ANTLRv4Lexer(input) + stream = CommonTokenStream(lexer) + parser = ANTLRv4Parser(stream) + parser.buildParseTrees = True + tree = parser.grammarSpec() + result = format(tree) + + connections = process(result) + + final_json = {} + for rule in connections: + final_json[rule] = list(connections[rule]) + + print(json.dumps(final_json, indent=2)) + +if __name__ == "__main__": + main(sys.argv) diff --git a/tools/grammar-analysis/grammar/ANTLRv4Lexer.g4 b/tools/grammar-analysis/grammar/ANTLRv4Lexer.g4 new file mode 100644 index 00000000..eddb8e2a --- /dev/null +++ b/tools/grammar-analysis/grammar/ANTLRv4Lexer.g4 @@ -0,0 +1,347 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012-2015 Terence Parr + * Copyright (c) 2012-2015 Sam Harwell + * Copyright (c) 2015 Gerald Rosenberg + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * A grammar for ANTLR v4 implemented using v4 syntax + * + * Modified 2015.06.16 gbr + * -- update for compatibility with Antlr v4.5 + */ + +lexer grammar ANTLRv4Lexer; + +options { + superClass = LexerAdaptor ; +} + +import LexBasic; // Standard set of fragments + +@header { +from LexerAdaptor import LexerAdaptor +} + +tokens { + TOKEN_REF, + RULE_REF, + LEXER_CHAR_SET +} + +channels { + OFF_CHANNEL // non-default channel for whitespace and comments +} + + +// ====================================================== +// Lexer specification +// + +// ------------------------- +// Comments + +DOC_COMMENT + : DocComment + ; + +BLOCK_COMMENT + : BlockComment -> channel(OFF_CHANNEL) + ; + +LINE_COMMENT + : LineComment -> channel(OFF_CHANNEL) + ; + + +// ------------------------- +// Integer +// + +INT : DecimalNumeral + ; + + +// ------------------------- +// Literal string +// +// ANTLR makes no distinction between a single character literal and a +// multi-character string. All literals are single quote delimited and +// may contain unicode escape sequences of the form \uxxxx, where x +// is a valid hexadecimal number (per Unicode standard). + +STRING_LITERAL + : SQuoteLiteral + ; + +UNTERMINATED_STRING_LITERAL + : USQuoteLiteral + ; + + +// ------------------------- +// Arguments +// +// Certain argument lists, such as those specifying call parameters +// to a rule invocation, or input parameters to a rule specification +// are contained within square brackets. + +BEGIN_ARGUMENT + : LBrack { self.handleBeginArgument() } + ; + + +// ------------------------- +// Actions + +BEGIN_ACTION + : LBrace -> pushMode(Action) + ; + + +// ------------------------- +// Keywords +// +// Keywords may not be used as labels for rules or in any other context where +// they would be ambiguous with the keyword vs some other identifier. OPTIONS, +// TOKENS, & CHANNELS blocks are handled idiomatically in dedicated lexical modes. + +OPTIONS : 'options' -> pushMode(Options) ; +TOKENS : 'tokens' -> pushMode(Tokens) ; +CHANNELS : 'channels' -> pushMode(Channels) ; + +IMPORT : 'import' ; +FRAGMENT : 'fragment' ; +LEXER : 'lexer' ; +PARSER : 'parser' ; +GRAMMAR : 'grammar' ; +PROTECTED : 'protected' ; +PUBLIC : 'public' ; +PRIVATE : 'private' ; +RETURNS : 'returns' ; +LOCALS : 'locals' ; +THROWS : 'throws' ; +CATCH : 'catch' ; +FINALLY : 'finally' ; +MODE : 'mode' ; + + +// ------------------------- +// Punctuation + +COLON : Colon ; +COLONCOLON : DColon ; +COMMA : Comma ; +SEMI : Semi ; +LPAREN : LParen ; +RPAREN : RParen ; +LBRACE : LBrace ; +RBRACE : RBrace ; +RARROW : RArrow ; +LT : Lt ; +GT : Gt ; +ASSIGN : Equal ; +QUESTION : Question ; +STAR : Star ; +PLUS_ASSIGN : PlusAssign ; +PLUS : Plus ; +OR : Pipe ; +DOLLAR : Dollar ; +RANGE : Range ; +DOT : Dot ; +AT : At ; +POUND : Pound ; +NOT : Tilde ; + + +// ------------------------- +// Identifiers - allows unicode rule/token names + +ID : Id + ; + + +// ------------------------- +// Whitespace + +WS : Ws+ -> channel(OFF_CHANNEL) ; + + +// ------------------------- +// Illegal Characters +// +// This is an illegal character trap which is always the last rule in the +// lexer specification. It matches a single character of any value and being +// the last rule in the file will match when no other rule knows what to do +// about the character. It is reported as an error but is not passed on to the +// parser. This means that the parser to deal with the gramamr file anyway +// but we will not try to analyse or code generate from a file with lexical +// errors. +// +// Comment this rule out to allow the error to be propagated to the parser + +ERRCHAR + : . -> channel(HIDDEN) + ; + + +// ====================================================== +// Lexer modes + +// ------------------------- +// Arguments + +mode Argument; // E.g., [int x, List a[]] + + NESTED_ARGUMENT : LBrack -> type(ARGUMENT_CONTENT), pushMode(Argument) ; + + ARGUMENT_ESCAPE : EscAny -> type(ARGUMENT_CONTENT) ; + + ARGUMENT_STRING_LITERAL : DQuoteLiteral -> type(ARGUMENT_CONTENT) ; + ARGUMENT_CHAR_LITERAL : SQuoteLiteral -> type(ARGUMENT_CONTENT) ; + + END_ARGUMENT : RBrack { self.handleEndArgument() } ; + + // added this to return non-EOF token type here. EOF does something weird + UNTERMINATED_ARGUMENT : EOF -> popMode ; + + ARGUMENT_CONTENT : . ; + + +// ------------------------- +// Actions +// +// Many language targets use {} as block delimiters and so we +// must recursively match {} delimited blocks to balance the +// braces. Additionally, we must make some assumptions about +// literal string representation in the target language. We assume +// that they are delimited by ' or " and so consume these +// in their own alts so as not to inadvertantly match {}. + +mode Action; + + NESTED_ACTION : LBrace -> type(ACTION_CONTENT), pushMode(Action) ; + + ACTION_ESCAPE : EscAny -> type(ACTION_CONTENT) ; + + ACTION_STRING_LITERAL : DQuoteLiteral -> type(ACTION_CONTENT) ; + ACTION_CHAR_LITERAL : SQuoteLiteral -> type(ACTION_CONTENT) ; + + ACTION_DOC_COMMENT : DocComment -> type(ACTION_CONTENT) ; + ACTION_BLOCK_COMMENT : BlockComment -> type(ACTION_CONTENT) ; + ACTION_LINE_COMMENT : LineComment -> type(ACTION_CONTENT) ; + + END_ACTION : RBrace { self.handleEndAction() } ; + + UNTERMINATED_ACTION : EOF -> popMode ; + + ACTION_CONTENT : . ; + + +// ------------------------- + +mode Options; + + OPT_DOC_COMMENT : DocComment -> type(DOC_COMMENT), channel(OFF_CHANNEL) ; + OPT_BLOCK_COMMENT : BlockComment -> type(BLOCK_COMMENT), channel(OFF_CHANNEL) ; + OPT_LINE_COMMENT : LineComment -> type(LINE_COMMENT), channel(OFF_CHANNEL) ; + + OPT_LBRACE : LBrace -> type(LBRACE) ; + OPT_RBRACE : RBrace -> type(RBRACE), popMode ; + + OPT_ID : Id -> type(ID) ; + OPT_DOT : Dot -> type(DOT) ; + OPT_ASSIGN : Equal -> type(ASSIGN) ; + OPT_STRING_LITERAL : SQuoteLiteral -> type(STRING_LITERAL) ; + OPT_INT : Int -> type(INT) ; + OPT_STAR : Star -> type(STAR) ; + OPT_SEMI : Semi -> type(SEMI) ; + + OPT_WS : Ws+ -> type(WS), channel(OFF_CHANNEL) ; + + +// ------------------------- + +mode Tokens; + + TOK_DOC_COMMENT : DocComment -> type(DOC_COMMENT), channel(OFF_CHANNEL) ; + TOK_BLOCK_COMMENT : BlockComment -> type(BLOCK_COMMENT), channel(OFF_CHANNEL) ; + TOK_LINE_COMMENT : LineComment -> type(LINE_COMMENT), channel(OFF_CHANNEL) ; + + TOK_LBRACE : LBrace -> type(LBRACE) ; + TOK_RBRACE : RBrace -> type(RBRACE), popMode ; + + TOK_ID : Id -> type(ID) ; + TOK_DOT : Dot -> type(DOT) ; + TOK_COMMA : Comma -> type(COMMA) ; + + TOK_WS : Ws+ -> type(WS), channel(OFF_CHANNEL) ; + + +// ------------------------- + +mode Channels; // currently same as Tokens mode; distinguished by keyword + + CHN_DOC_COMMENT : DocComment -> type(DOC_COMMENT), channel(OFF_CHANNEL) ; + CHN_BLOCK_COMMENT : BlockComment -> type(BLOCK_COMMENT), channel(OFF_CHANNEL) ; + CHN_LINE_COMMENT : LineComment -> type(LINE_COMMENT), channel(OFF_CHANNEL) ; + + CHN_LBRACE : LBrace -> type(LBRACE) ; + CHN_RBRACE : RBrace -> type(RBRACE), popMode ; + + CHN_ID : Id -> type(ID) ; + CHN_DOT : Dot -> type(DOT) ; + CHN_COMMA : Comma -> type(COMMA) ; + + CHN_WS : Ws+ -> type(WS), channel(OFF_CHANNEL) ; + + +// ------------------------- + +mode LexerCharSet; + + LEXER_CHAR_SET_BODY + : ( ~[\]\\] + | EscAny + )+ -> more + ; + + LEXER_CHAR_SET + : RBrack -> popMode + ; + + UNTERMINATED_CHAR_SET + : EOF -> popMode + ; + + +// ------------------------------------------------------------------------------ +// Grammar specific Keywords, Punctuation, etc. + +fragment Id : NameStartChar NameChar* ; + diff --git a/tools/grammar-analysis/grammar/ANTLRv4Lexer.tokens b/tools/grammar-analysis/grammar/ANTLRv4Lexer.tokens new file mode 100644 index 00000000..1bf330e7 --- /dev/null +++ b/tools/grammar-analysis/grammar/ANTLRv4Lexer.tokens @@ -0,0 +1,78 @@ +TOKEN_REF=1 +RULE_REF=2 +LEXER_CHAR_SET=3 +DOC_COMMENT=4 +BLOCK_COMMENT=5 +LINE_COMMENT=6 +INT=7 +STRING_LITERAL=8 +UNTERMINATED_STRING_LITERAL=9 +BEGIN_ARGUMENT=10 +BEGIN_ACTION=11 +OPTIONS=12 +TOKENS=13 +CHANNELS=14 +IMPORT=15 +FRAGMENT=16 +LEXER=17 +PARSER=18 +GRAMMAR=19 +PROTECTED=20 +PUBLIC=21 +PRIVATE=22 +RETURNS=23 +LOCALS=24 +THROWS=25 +CATCH=26 +FINALLY=27 +MODE=28 +COLON=29 +COLONCOLON=30 +COMMA=31 +SEMI=32 +LPAREN=33 +RPAREN=34 +LBRACE=35 +RBRACE=36 +RARROW=37 +LT=38 +GT=39 +ASSIGN=40 +QUESTION=41 +STAR=42 +PLUS_ASSIGN=43 +PLUS=44 +OR=45 +DOLLAR=46 +RANGE=47 +DOT=48 +AT=49 +POUND=50 +NOT=51 +ID=52 +WS=53 +ERRCHAR=54 +END_ARGUMENT=55 +UNTERMINATED_ARGUMENT=56 +ARGUMENT_CONTENT=57 +END_ACTION=58 +UNTERMINATED_ACTION=59 +ACTION_CONTENT=60 +UNTERMINATED_CHAR_SET=61 +'options'=12 +'tokens'=13 +'channels'=14 +'import'=15 +'fragment'=16 +'lexer'=17 +'parser'=18 +'grammar'=19 +'protected'=20 +'public'=21 +'private'=22 +'returns'=23 +'locals'=24 +'throws'=25 +'catch'=26 +'finally'=27 +'mode'=28 diff --git a/tools/grammar-analysis/grammar/ANTLRv4Parser.g4 b/tools/grammar-analysis/grammar/ANTLRv4Parser.g4 new file mode 100644 index 00000000..1dd5d6c2 --- /dev/null +++ b/tools/grammar-analysis/grammar/ANTLRv4Parser.g4 @@ -0,0 +1,379 @@ +/* + * [The "BSD license"] + * Copyright (c) 2012-2014 Terence Parr + * Copyright (c) 2012-2014 Sam Harwell + * Copyright (c) 2015 Gerald Rosenberg + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ +/* A grammar for ANTLR v4 written in ANTLR v4. + * + * Modified 2015.06.16 gbr + * -- update for compatibility with Antlr v4.5 + * -- add mode for channels + * -- moved members to LexerAdaptor + * -- move fragments to imports + */ + +parser grammar ANTLRv4Parser; + +options + { tokenVocab = ANTLRv4Lexer; } + +// The main entry point for parsing a v4 grammar. +grammarSpec + : DOC_COMMENT* grammarType identifier SEMI prequelConstruct* rules modeSpec* EOF + ; + +grammarType + : (LEXER GRAMMAR | PARSER GRAMMAR | GRAMMAR) + ; + +// This is the list of all constructs that can be declared before +// the set of rules that compose the grammar, and is invoked 0..n +// times by the grammarPrequel rule. +prequelConstruct + : optionsSpec + | delegateGrammars + | tokensSpec + | channelsSpec + | action + ; + +// ------------ +// Options - things that affect analysis and/or code generation +optionsSpec + : OPTIONS LBRACE (option SEMI)* RBRACE + ; + +option + : identifier ASSIGN optionValue + ; + +optionValue + : identifier (DOT identifier)* + | STRING_LITERAL + | actionBlock + | INT + ; + +// ------------ +// Delegates +delegateGrammars + : IMPORT delegateGrammar (COMMA delegateGrammar)* SEMI + ; + +delegateGrammar + : identifier ASSIGN identifier + | identifier + ; + +// ------------ +// Tokens & Channels +tokensSpec + : TOKENS LBRACE idList? RBRACE + ; + +channelsSpec + : CHANNELS LBRACE idList? RBRACE + ; + +idList + : identifier (COMMA identifier)* COMMA? + ; + +// Match stuff like @parser::members {int i;} +action + : AT (actionScopeName COLONCOLON)? identifier actionBlock + ; + +// Scope names could collide with keywords; allow them as ids for action scopes +actionScopeName + : identifier + | LEXER + | PARSER + ; + +actionBlock + : BEGIN_ACTION ACTION_CONTENT* END_ACTION + ; + +argActionBlock + : BEGIN_ARGUMENT ARGUMENT_CONTENT* END_ARGUMENT + ; + +modeSpec + : MODE identifier SEMI lexerRuleSpec* + ; + +rules + : ruleSpec* + ; + +ruleSpec + : parserRuleSpec + | lexerRuleSpec + ; + +parserRuleSpec + : DOC_COMMENT* ruleModifiers? RULE_REF argActionBlock? ruleReturns? throwsSpec? localsSpec? rulePrequel* COLON ruleBlock SEMI exceptionGroup + ; + +exceptionGroup + : exceptionHandler* finallyClause? + ; + +exceptionHandler + : CATCH argActionBlock actionBlock + ; + +finallyClause + : FINALLY actionBlock + ; + +rulePrequel + : optionsSpec + | ruleAction + ; + +ruleReturns + : RETURNS argActionBlock + ; + +// -------------- +// Exception spec +throwsSpec + : THROWS identifier (COMMA identifier)* + ; + +localsSpec + : LOCALS argActionBlock + ; + +/** Match stuff like @init {int i;} */ +ruleAction + : AT identifier actionBlock + ; + +ruleModifiers + : ruleModifier + + ; + +// An individual access modifier for a rule. The 'fragment' modifier +// is an internal indication for lexer rules that they do not match +// from the input but are like subroutines for other lexer rules to +// reuse for certain lexical patterns. The other modifiers are passed +// to the code generation templates and may be ignored by the template +// if they are of no use in that language. +ruleModifier + : PUBLIC + | PRIVATE + | PROTECTED + | FRAGMENT + ; + +ruleBlock + : ruleAltList + ; + +ruleAltList + : labeledAlt (OR labeledAlt)* + ; + +labeledAlt + : alternative (POUND identifier)? + ; + +// -------------------- +// Lexer rules +lexerRuleSpec + : DOC_COMMENT* FRAGMENT? TOKEN_REF COLON lexerRuleBlock SEMI + ; + +lexerRuleBlock + : lexerAltList + ; + +lexerAltList + : lexerAlt (OR lexerAlt)* + ; + +lexerAlt + : lexerElements lexerCommands? + | + // explicitly allow empty alts + ; + +lexerElements + : lexerElement + + ; + +lexerElement + : labeledLexerElement ebnfSuffix? + | lexerAtom ebnfSuffix? + | lexerBlock ebnfSuffix? + | actionBlock QUESTION? + ; + +// but preds can be anywhere +labeledLexerElement + : identifier (ASSIGN | PLUS_ASSIGN) (lexerAtom | block) + ; + +lexerBlock + : LPAREN lexerAltList RPAREN + ; + +// E.g., channel(HIDDEN), skip, more, mode(INSIDE), push(INSIDE), pop +lexerCommands + : RARROW lexerCommand (COMMA lexerCommand)* + ; + +lexerCommand + : lexerCommandName LPAREN lexerCommandExpr RPAREN + | lexerCommandName + ; + +lexerCommandName + : identifier + | MODE + ; + +lexerCommandExpr + : identifier + | INT + ; + +// -------------------- +// Rule Alts +altList + : alternative (OR alternative)* + ; + +alternative + : elementOptions? element + + | + // explicitly allow empty alts + ; + +element + : labeledElement (ebnfSuffix |) + | atom (ebnfSuffix |) + | ebnf + | actionBlock QUESTION? + ; + +labeledElement + : identifier (ASSIGN | PLUS_ASSIGN) (atom | block) + ; + +// -------------------- +// EBNF and blocks +ebnf + : block blockSuffix? + ; + +blockSuffix + : ebnfSuffix + ; + +ebnfSuffix + : QUESTION QUESTION? + | STAR QUESTION? + | PLUS QUESTION? + ; + +lexerAtom + : characterRange + | terminal + | notSet + | LEXER_CHAR_SET + | DOT elementOptions? + ; + +atom + : characterRange + | terminal + | ruleref + | notSet + | DOT elementOptions? + ; + +// -------------------- +// Inverted element set +notSet + : NOT setElement + | NOT blockSet + ; + +blockSet + : LPAREN setElement (OR setElement)* RPAREN + ; + +setElement + : TOKEN_REF elementOptions? + | STRING_LITERAL elementOptions? + | characterRange + | LEXER_CHAR_SET + ; + +// ------------- +// Grammar Block +block + : LPAREN (optionsSpec? ruleAction* COLON)? altList RPAREN + ; + +// ---------------- +// Parser rule ref +ruleref + : RULE_REF argActionBlock? elementOptions? + ; + +// --------------- +// Character Range +characterRange + : STRING_LITERAL RANGE STRING_LITERAL + ; + +terminal + : TOKEN_REF elementOptions? + | STRING_LITERAL elementOptions? + ; + +// Terminals may be adorned with certain options when +// reference in the grammar: TOK<,,,> +elementOptions + : LT elementOption (COMMA elementOption)* GT + ; + +elementOption + : identifier + | identifier ASSIGN (identifier | STRING_LITERAL) + ; + +identifier + : RULE_REF + | TOKEN_REF + ; diff --git a/tools/grammar-analysis/grammar/ANTLRv4Parser.tokens b/tools/grammar-analysis/grammar/ANTLRv4Parser.tokens new file mode 100644 index 00000000..1bf330e7 --- /dev/null +++ b/tools/grammar-analysis/grammar/ANTLRv4Parser.tokens @@ -0,0 +1,78 @@ +TOKEN_REF=1 +RULE_REF=2 +LEXER_CHAR_SET=3 +DOC_COMMENT=4 +BLOCK_COMMENT=5 +LINE_COMMENT=6 +INT=7 +STRING_LITERAL=8 +UNTERMINATED_STRING_LITERAL=9 +BEGIN_ARGUMENT=10 +BEGIN_ACTION=11 +OPTIONS=12 +TOKENS=13 +CHANNELS=14 +IMPORT=15 +FRAGMENT=16 +LEXER=17 +PARSER=18 +GRAMMAR=19 +PROTECTED=20 +PUBLIC=21 +PRIVATE=22 +RETURNS=23 +LOCALS=24 +THROWS=25 +CATCH=26 +FINALLY=27 +MODE=28 +COLON=29 +COLONCOLON=30 +COMMA=31 +SEMI=32 +LPAREN=33 +RPAREN=34 +LBRACE=35 +RBRACE=36 +RARROW=37 +LT=38 +GT=39 +ASSIGN=40 +QUESTION=41 +STAR=42 +PLUS_ASSIGN=43 +PLUS=44 +OR=45 +DOLLAR=46 +RANGE=47 +DOT=48 +AT=49 +POUND=50 +NOT=51 +ID=52 +WS=53 +ERRCHAR=54 +END_ARGUMENT=55 +UNTERMINATED_ARGUMENT=56 +ARGUMENT_CONTENT=57 +END_ACTION=58 +UNTERMINATED_ACTION=59 +ACTION_CONTENT=60 +UNTERMINATED_CHAR_SET=61 +'options'=12 +'tokens'=13 +'channels'=14 +'import'=15 +'fragment'=16 +'lexer'=17 +'parser'=18 +'grammar'=19 +'protected'=20 +'public'=21 +'private'=22 +'returns'=23 +'locals'=24 +'throws'=25 +'catch'=26 +'finally'=27 +'mode'=28 diff --git a/tools/grammar-analysis/grammar/LexBasic.g4 b/tools/grammar-analysis/grammar/LexBasic.g4 new file mode 100644 index 00000000..043b799b --- /dev/null +++ b/tools/grammar-analysis/grammar/LexBasic.g4 @@ -0,0 +1,302 @@ +/* + * [The "BSD license"] + * Copyright (c) 2014-2015 Gerald Rosenberg + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. The name of the author may not be used to endorse or promote products + * derived from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES + * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT + * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +/** + * A generally reusable set of fragments for import in to Lexer grammars. + * + * Modified 2015.06.16 gbr - + * -- generalized for inclusion into the ANTLRv4 grammar distribution + * + */ +lexer grammar LexBasic; + +// ====================================================== +// Lexer fragments +// +// ----------------------------------- +// Whitespace & Comments + +fragment Ws + : Hws | Vws + ; + + +fragment Hws + : [ \t] + ; + + +fragment Vws + : [\r\n\f] + ; + + +fragment BlockComment + : '/*' .*? ('*/' | EOF) + ; + + +fragment DocComment + : '/**' .*? ('*/' | EOF) + ; + +fragment LineComment + : '//' ~ [\r\n]* + ; + +// ----------------------------------- +// Escapes +// Any kind of escaped character that we can embed within ANTLR literal strings. + +fragment EscSeq + : Esc ([btnfr"'\\] | UnicodeEsc | . | EOF) + ; + + +fragment EscAny + : Esc . + ; + + +fragment UnicodeEsc + : 'u' (HexDigit (HexDigit (HexDigit HexDigit?)?)?)? + ; + +// ----------------------------------- +// Numerals + +fragment DecimalNumeral + : '0' | [1-9] DecDigit* + ; + +// ----------------------------------- +// Digits + +fragment HexDigit + : [0-9a-fA-F] + ; + + +fragment DecDigit + : [0-9] + ; + +// ----------------------------------- +// Literals + +fragment BoolLiteral + : 'true' | 'false' + ; + + +fragment CharLiteral + : SQuote (EscSeq | ~ ['\r\n\\]) SQuote + ; + + +fragment SQuoteLiteral + : SQuote (EscSeq | ~ ['\r\n\\])* SQuote + ; + + +fragment DQuoteLiteral + : DQuote (EscSeq | ~ ["\r\n\\])* DQuote + ; + + +fragment USQuoteLiteral + : SQuote (EscSeq | ~ ['\r\n\\])* + ; + +// ----------------------------------- +// Character ranges + +fragment NameChar + : NameStartChar | '0' .. '9' | Underscore | '\u00B7' | '\u0300' .. '\u036F' | '\u203F' .. '\u2040' + ; + + +fragment NameStartChar + : 'A' .. 'Z' | 'a' .. 'z' | '\u00C0' .. '\u00D6' | '\u00D8' .. '\u00F6' | '\u00F8' .. '\u02FF' | '\u0370' .. '\u037D' | '\u037F' .. '\u1FFF' | '\u200C' .. '\u200D' | '\u2070' .. '\u218F' | '\u2C00' .. '\u2FEF' | '\u3001' .. '\uD7FF' | '\uF900' .. '\uFDCF' | '\uFDF0' .. '\uFFFD' + ; + +// ignores | ['\u10000-'\uEFFFF] ; +// ----------------------------------- +// Types + +fragment Int + : 'int' + ; + +// ----------------------------------- +// Symbols + +fragment Esc + : '\\' + ; + + +fragment Colon + : ':' + ; + + +fragment DColon + : '::' + ; + + +fragment SQuote + : '\'' + ; + + +fragment DQuote + : '"' + ; + + +fragment LParen + : '(' + ; + + +fragment RParen + : ')' + ; + + +fragment LBrace + : '{' + ; + + +fragment RBrace + : '}' + ; + + +fragment LBrack + : '[' + ; + + +fragment RBrack + : ']' + ; + + +fragment RArrow + : '->' + ; + + +fragment Lt + : '<' + ; + + +fragment Gt + : '>' + ; + + +fragment Equal + : '=' + ; + + +fragment Question + : '?' + ; + + +fragment Star + : '*' + ; + + +fragment Plus + : '+' + ; + + +fragment PlusAssign + : '+=' + ; + + +fragment Underscore + : '_' + ; + + +fragment Pipe + : '|' + ; + + +fragment Dollar + : '$' + ; + + +fragment Comma + : ',' + ; + + +fragment Semi + : ';' + ; + + +fragment Dot + : '.' + ; + + +fragment Range + : '..' + ; + + +fragment At + : '@' + ; + + +fragment Pound + : '#' + ; + + +fragment Tilde + : '~' + ; diff --git a/tools/grammar-analysis/grammar/LexBasic.tokens b/tools/grammar-analysis/grammar/LexBasic.tokens new file mode 100644 index 00000000..e69de29b diff --git a/tools/grammar-analysis/grammar/three.g4 b/tools/grammar-analysis/grammar/three.g4 new file mode 100644 index 00000000..c5779c08 --- /dev/null +++ b/tools/grammar-analysis/grammar/three.g4 @@ -0,0 +1,3 @@ +/**/ +grammar three; +/* */ diff --git a/tools/grammar-analysis/pom.xml b/tools/grammar-analysis/pom.xml new file mode 100644 index 00000000..92e969dc --- /dev/null +++ b/tools/grammar-analysis/pom.xml @@ -0,0 +1,62 @@ + + 4.0.0 + antlr4 + jar + ANTLR4 grammar + + com.antlr.grammarsv4 + grammarsv4 + 1.0-SNAPSHOT + ../pom.xml + + + + + org.antlr + antlr4-maven-plugin + ${antlr.version} + + ${basedir} + ANTLRv4Parser.g4 + + ANTLRv4Lexer.g4 + ANTLRv4Parser.g4 + + + -package + org.antlr.parser.antlr4 + + ${project.build.directory}/generated-sources/antlr4/org/antlr/parser/antlr4 + true + true + + + + + antlr4 + + + + + + org.antlr + antlr4test-maven-plugin + + true + true + grammarSpec + ANTLRv4 + org.antlr.parser.antlr4 + examples/ + + + + + test + + + + + + +