Commit b0dadb4c authored by Maxime Perrotin's avatar Maxime Perrotin
Browse files

Initial commit - DMT with setup.py install script

parents
(C) Semantix Information Technologies.
Semantix Information Technologies is licensing the code of the
Data Modelling Tools (DMT) in the following dual-license mode:
Commercial Developer License:
The DMT Commercial Developer License is the suggested version
to use for the development of proprietary and/or commercial software.
This version is for developers/companies who do not want to comply
with the terms of the GNU Lesser General Public License version 2.1.
GNU LGPL v. 2.1:
This version of DMT is the one to use for the development of
applications, when you are willing to comply with the terms of the-
GNU Lesser General Public License version 2.1.
Note that in both cases, there are no charges (royalties) for the
generated code.
To purchase a commercial developer license (covering the entire
DMT toolchain), please contact Semantix at: dmt@semantix.gr
TASTE Data Modelling Technologies
- commonPy - Base API for parsing ASN.1
- asn2aadlPlus - Convert ASN.1 modules to AADL
- asn2dataModel - A mappers, convert ASN.1 for use in "any" language (Simulink, C, Ada, Python...)
- aadl2glueC - B mappers, generate runtime code for converting data from/to ASN.1 in various languages
### $ANTLR 2.7.7 (20120126): "aadl.g" -> "AadlLexer.py"$
### import antlr and other modules ..
import sys
import antlr
version = sys.version.split()[0]
if version < '2.2.1':
False = 0
if version < '2.3':
True = not False
### header action >>>
### header action <<<
### preamble action >>>
### preamble action <<<
### >>>The Literals<<<
literals = {}
literals[u"type"] = 32
literals[u"inverse"] = 92
literals[u"constant"] = 70
literals[u"connections"] = 58
literals[u"public"] = 7
literals[u"list"] = 69
literals[u"initial"] = 87
literals[u"applies"] = 62
literals[u"end"] = 5
literals[u"aadlboolean"] = 39
literals[u"flows"] = 94
literals[u"memory"] = 20
literals[u"aadlstring"] = 40
literals[u"flow"] = 67
literals[u"system"] = 16
literals[u"implementation"] = 24
literals[u"to"] = 28
literals[u"and"] = 80
literals[u"not"] = 99
literals[u"package"] = 4
literals[u"inherit"] = 61
literals[u"aadlreal"] = 48
literals[u"source"] = 95
literals[u"reference"] = 57
literals[u"provides"] = 29
literals[u"server"] = 59
literals[u"sink"] = 96
literals[u"event"] = 66
literals[u"range"] = 54
literals[u"enumeration"] = 41
literals[u"calls"] = 85
literals[u"out"] = 91
literals[u"set"] = 37
literals[u"parameter"] = 68
literals[u"of"] = 55
literals[u"is"] = 38
literals[u"aadlinteger"] = 49
literals[u"or"] = 79
literals[u"access"] = 60
literals[u"none"] = 11
literals[u"features"] = 25
literals[u"data"] = 18
literals[u"all"] = 63
literals[u"thread"] = 12
literals[u"path"] = 97
literals[u"properties"] = 72
literals[u"units"] = 45
literals[u"bus"] = 21
literals[u"binding"] = 78
literals[u"extends"] = 13
literals[u"private"] = 8
literals[u"port"] = 65
literals[u"requires"] = 30
literals[u"refines"] = 31
literals[u"false"] = 82
literals[u"processor"] = 19
literals[u"device"] = 22
literals[u"property"] = 36
literals[u"annex"] = 34
literals[u"classifier"] = 56
literals[u"transitions"] = 100
literals[u"process"] = 15
literals[u"value"] = 76
literals[u"modes"] = 86
literals[u"in"] = 77
literals[u"delta"] = 71
literals[u"mode"] = 64
literals[u"true"] = 81
literals[u"group"] = 14
literals[u"refined"] = 27
literals[u"subprogram"] = 17
literals[u"subcomponents"] = 33
### import antlr.Token
from antlr import Token
### >>>The Known Token Types <<<
SKIP = antlr.SKIP
INVALID_TYPE = antlr.INVALID_TYPE
EOF_TYPE = antlr.EOF_TYPE
EOF = antlr.EOF
NULL_TREE_LOOKAHEAD = antlr.NULL_TREE_LOOKAHEAD
MIN_USER_TYPE = antlr.MIN_USER_TYPE
PACKAGE = 4
END = 5
SEMI = 6
PUBLIC = 7
PRIVATE = 8
IDENT = 9
DOUBLECOLON = 10
NONE = 11
THREAD = 12
EXTENDS = 13
GROUP = 14
PROCESS = 15
SYSTEM = 16
SUBPROGRAM = 17
DATA = 18
PROCESSOR = 19
MEMORY = 20
BUS = 21
DEVICE = 22
DOT = 23
IMPL = 24
FEATURES = 25
COLON = 26
REFINED = 27
TO = 28
PROVIDES = 29
REQUIRES = 30
REFINES = 31
TYPE = 32
SUBCOMPONENTS = 33
ANNEX = 34
ANNEX_TEXT = 35
PROPERTY = 36
SET = 37
IS = 38
BOOLEAN = 39
STRING = 40
ENUMERATION = 41
LPAREN = 42
COMMA = 43
RPAREN = 44
UNITS = 45
ASSIGN = 46
STAR = 47
REAL = 48
INTEGER = 49
DOTDOT = 50
PLUS = 51
MINUS = 52
NUMERIC_LIT = 53
RANGE = 54
OF = 55
CLASSIFIER = 56
REFERENCE = 57
CONNECTIONS = 58
SERVER = 59
ACCESS = 60
INHERIT = 61
APPLIES = 62
ALL = 63
MODE = 64
PORT = 65
EVENT = 66
FLOW = 67
PARAMETER = 68
LIST = 69
CONSTANT = 70
DELTA = 71
PROPERTIES = 72
LCURLY = 73
RCURLY = 74
ASSIGNPLUS = 75
VALUE = 76
IN = 77
BINDING = 78
OR = 79
AND = 80
TRUE = 81
FALSE = 82
NOT = 83
STRING_LITERAL = 84
CALLS = 85
MODES = 86
INITIAL = 87
LTRANS = 88
RTRANS = 89
ARROW = 90
OUT = 91
INVERSE = 92
DARROW = 93
FLOWS = 94
SOURCE = 95
SINK = 96
PATH = 97
AADLSPEC = 98
NOTT = 99
TRANSITIONS = 100
HASH = 101
DIGIT = 102
EXPONENT = 103
INT_EXPONENT = 104
EXTENDED_DIGIT = 105
BASED_INTEGER = 106
BASE = 107
ESC = 108
HEX_DIGIT = 109
WS = 110
SL_COMMENT = 111
class Lexer(antlr.CharScanner) :
### user action >>>
### user action <<<
def __init__(self, *argv, **kwargs) :
antlr.CharScanner.__init__(self, *argv, **kwargs)
self.caseSensitiveLiterals = False
self.setCaseSensitive(False)
self.literals = literals
def nextToken(self):
while True:
try: ### try again ..
while True:
_token = None
_ttype = INVALID_TYPE
self.resetText()
try: ## for char stream error handling
try: ##for lexical error handling
la1 = self.LA(1)
if False:
pass
elif la1 and la1 in u'(':
pass
self.mLPAREN(True)
theRetToken = self._returnToken
elif la1 and la1 in u')':
pass
self.mRPAREN(True)
theRetToken = self._returnToken
elif la1 and la1 in u'}':
pass
self.mRCURLY(True)
theRetToken = self._returnToken
elif la1 and la1 in u'*':
pass
self.mSTAR(True)
theRetToken = self._returnToken
elif la1 and la1 in u';':
pass
self.mSEMI(True)
theRetToken = self._returnToken
elif la1 and la1 in u',':
pass
self.mCOMMA(True)
theRetToken = self._returnToken
elif la1 and la1 in u'=':
pass
self.mASSIGN(True)
theRetToken = self._returnToken
elif la1 and la1 in u']':
pass
self.mRTRANS(True)
theRetToken = self._returnToken
elif la1 and la1 in u'#':
pass
self.mHASH(True)
theRetToken = self._returnToken
elif la1 and la1 in u'abcdefghijklmnopqrstuvwxyz':
pass
self.mIDENT(True)
theRetToken = self._returnToken
elif la1 and la1 in u'"':
pass
self.mSTRING_LITERAL(True)
theRetToken = self._returnToken
elif la1 and la1 in u'0123456789':
pass
self.mNUMERIC_LIT(True)
theRetToken = self._returnToken
elif la1 and la1 in u'\t\n\r ':
pass
self.mWS(True)
theRetToken = self._returnToken
else:
if (self.LA(1)==u'-') and (self.LA(2)==u'>') and (self.LA(3)==u'>'):
pass
self.mDARROW(True)
theRetToken = self._returnToken
elif (self.LA(1)==u'.') and (self.LA(2)==u'.'):
pass
self.mDOTDOT(True)
theRetToken = self._returnToken
elif (self.LA(1)==u'+') and (self.LA(2)==u'='):
pass
self.mASSIGNPLUS(True)
theRetToken = self._returnToken
elif (self.LA(1)==u':') and (self.LA(2)==u':'):
pass
self.mDOUBLECOLON(True)
theRetToken = self._returnToken
elif (self.LA(1)==u'-') and (self.LA(2)==u'['):
pass
self.mLTRANS(True)
theRetToken = self._returnToken
elif (self.LA(1)==u'-') and (self.LA(2)==u'>') and (True):
pass
self.mARROW(True)
theRetToken = self._returnToken
elif (self.LA(1)==u'{') and (self.LA(2)==u'*'):
pass
self.mANNEX_TEXT(True)
theRetToken = self._returnToken
elif (self.LA(1)==u'-') and (self.LA(2)==u'-'):
pass
self.mSL_COMMENT(True)
theRetToken = self._returnToken
elif (self.LA(1)==u'{') and (True):
pass
self.mLCURLY(True)
theRetToken = self._returnToken
elif (self.LA(1)==u':') and (True):
pass
self.mCOLON(True)
theRetToken = self._returnToken
elif (self.LA(1)==u'+') and (True):
pass
self.mPLUS(True)
theRetToken = self._returnToken
elif (self.LA(1)==u'-') and (True):
pass
self.mMINUS(True)
theRetToken = self._returnToken
elif (self.LA(1)==u'.') and (True):
pass
self.mDOT(True)
theRetToken = self._returnToken
else:
self.default(self.LA(1))
if not self._returnToken:
raise antlr.TryAgain ### found SKIP token
### option { testLiterals=true }
self.testForLiteral(self._returnToken)
### return token to caller
return self._returnToken
### handle lexical errors ....
except antlr.RecognitionException, e:
self.reportError(e)
self.consume()
### handle char stream errors ...
except antlr.CharStreamException,cse:
if isinstance(cse, antlr.CharStreamIOException):
raise antlr.TokenStreamIOException(cse.io)
else:
raise antlr.TokenStreamException(str(cse))
except antlr.TryAgain:
pass
def mLPAREN(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = LPAREN
_saveIndex = 0
try: ## for error handling
pass
self.match('(')
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mRPAREN(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = RPAREN
_saveIndex = 0
try: ## for error handling
pass
self.match(')')
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mLCURLY(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = LCURLY
_saveIndex = 0
try: ## for error handling
pass
self.match('{')
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mRCURLY(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = RCURLY
_saveIndex = 0
try: ## for error handling
pass
self.match('}')
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mCOLON(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = COLON
_saveIndex = 0
try: ## for error handling
pass
self.match(':')
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mPLUS(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = PLUS
_saveIndex = 0
try: ## for error handling
pass
self.match('+')
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mMINUS(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = MINUS
_saveIndex = 0
try: ## for error handling
pass
self.match('-')
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mSTAR(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = STAR
_saveIndex = 0
try: ## for error handling
pass
self.match('*')
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mSEMI(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = SEMI
_saveIndex = 0
try: ## for error handling
pass
self.match(';')
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mCOMMA(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = COMMA
_saveIndex = 0
try: ## for error handling
pass
self.match(',')
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mDOT(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = DOT
_saveIndex = 0
try: ## for error handling
pass
self.match('.')
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mDOTDOT(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = DOTDOT
_saveIndex = 0
try: ## for error handling
pass
self.match("..")
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mASSIGN(self, _createToken):
_ttype = 0
_token = None
_begin = self.text.length()
_ttype = ASSIGN
_saveIndex = 0
try: ## for error handling
pass
self.match("=>")
except antlr.RecognitionException, ex:
self.reportError(ex)
self.consume()
self.consumeUntil(_tokenSet_0)
self.set_return_token(_createToken, _token, _ttype, _begin)
def mASSIGNPLUS(self, _createToken):
_ttype = 0