Commit 12c53f5a authored by Thanassis Tsiodras's avatar Thanassis Tsiodras
Browse files

Statement coverage operational and back to 100% - QGen still disabled

parent 4c6e51dc
......@@ -2,3 +2,5 @@
*,cover
.coverage
tests-coverage/output
tests-coverage/smp2.asn
tests-coverage/datatypessimulink.cat
......@@ -224,6 +224,10 @@ types). This used to cover Dumpable C/Ada Types and OG headers.'''
def main():
sys.path.append(os.path.abspath(os.path.dirname(sys.argv[0])))
global underCoverageAnalysis
underCoverageAnalysis = os.environ.get('COVERAGE') == "1"
if underCoverageAnalysis:
sys.path.append(os.path.abspath(os.path.dirname(sys.argv[0]) + os.sep + ".."))
if sys.argv.count("-o") != 0:
idx = sys.argv.index("-o")
try:
......@@ -364,7 +368,10 @@ def main():
backendFilename = "." + modelingLanguage.lower() + "_B_mapper.py"
inform("Parsing %s...", backendFilename)
try:
backend = import_module(backendFilename[:-3], 'aadl2glueC')
if underCoverageAnalysis:
backend = import_module(backendFilename[1:-3])
else: # pragma: no cover
backend = import_module(backendFilename[:-3], 'aadl2glueC') # pragma: no cover
if backendFilename[:-3] not in loadedBackends:
loadedBackends[backendFilename[:-3]] = 1
if commonPy.configMT.verbose:
......@@ -466,10 +473,16 @@ def main():
def mappers(lang):
if lang.lower() in ["gui_pi", "gui_ri"]:
return [import_module(".python_B_mapper", "aadl2glueC"),
import_module(".pyside_B_mapper", "aadl2glueC")]
if underCoverageAnalysis:
return [import_module("python_B_mapper"), import_module("pyside_B_mapper")]
else: # pragma: no cover
return [import_module(".python_B_mapper", "aadl2glueC"),
import_module(".pyside_B_mapper", "aadl2glueC")] # pragma: no cover
elif lang.lower() == "vhdl": # pragma: no cover
return [import_module(".vhdl_B_mapper", "aadl2glueC")] # pragma: no cover
if underCoverageAnalysis:
return [import_module("vhdl_B_mapper")] # pragma: no cover
else: # pragma: no cover
return [import_module(".vhdl_B_mapper", "aadl2glueC")] # pragma: no cover
for si in [x for x in SystemsAndImplementations if x[2] is not None and x[2].lower() in ["gui_ri", "gui_pi", "vhdl"]]:
# We do, start the work
......
......@@ -64,6 +64,10 @@ def usage(argsToTools):
def main():
sys.path.append(os.path.abspath(os.path.dirname(sys.argv[0])))
global underCoverageAnalysis
underCoverageAnalysis = os.environ.get('COVERAGE') == "1"
if underCoverageAnalysis:
sys.path.append(os.path.abspath(os.path.dirname(sys.argv[0]) + os.sep + ".."))
sys.path.append('commonPy')
argsToTools = {
......@@ -162,7 +166,10 @@ def main():
backendFilename = "." + modelingLanguage.lower() + "_A_mapper.py"
inform("Parsing %s...", backendFilename)
try:
backend = import_module(backendFilename[:-3], 'asn2dataModel')
if underCoverageAnalysis:
backend = import_module(backendFilename[1:-3])
else: # pragma: no cover
backend = import_module(backendFilename[:-3], 'asn2dataModel') # pragma: no cover
if backendFilename[:-3] not in loadedBackends:
loadedBackends[backendFilename[:-3]] = 1
if commonPy.configMT.verbose:
......
import re
import sys
from lxml import etree
from commonPy.asnAST import AsnBool, AsnInt, AsnReal, \
AsnEnumerated, AsnOctetString, AsnSequenceOf, AsnSet, \
AsnSetOf, AsnSequence, AsnChoice, AsnMetaMember
# Level of verbosity
g_verboseLevel = 0
# colors (used when calling 'info')
ESC = chr(27)
red = ESC+"[31m"
green = ESC+"[32m"
white = ESC+"[0m"
yellow = ESC+"[33m"
colors=[red, green, white, yellow]
# Lookup table for SMP2 types that map to AsnBasicNodes
class MagicSmp2SimpleTypesDict(dict):
def __getitem__(self, name):
# strip 'http://www.esa.int/XXXX/YY/Smp#Bool'
# to 'http://www.esa.int/Smp#Bool'
name = re.sub(r'/\d{4}/\d{2}/', '/', name)
return super(MagicSmp2SimpleTypesDict, self).__getitem__(name)
#---------------------------------------------------------------------------
def __contains__(self, name):
name = re.sub(r'/\d{4}/\d{2}/', '/', name)
return super(MagicSmp2SimpleTypesDict, self).__contains__(name)
#---------------------------------------------------------------------------
def has_key(self, name):
name = re.sub(r'/\d{4}/\d{2}/', '/', name)
return super(MagicSmp2SimpleTypesDict, self).has_key(name)
simpleTypesTable = MagicSmp2SimpleTypesDict({
'http://www.esa.int/Smp#Bool': (AsnBool, None, None),
'http://www.esa.int/Smp#Char8': (AsnInt, 0, 255),
'http://www.esa.int/Smp#DateTime': (AsnOctetString, 30, 30),
'http://www.esa.int/Smp#Duration': (AsnInt, 0, 2147483647),
'http://www.esa.int/Smp#Int8': (AsnInt, -128, 127),
'http://www.esa.int/Smp#Int16': (AsnInt, -32768, 32767),
'http://www.esa.int/Smp#Int32': (AsnInt, -2147483648, 2147483647),
'http://www.esa.int/Smp#Int64': (AsnInt, -9223372036854775808, 9223372036854775807),
'http://www.esa.int/Smp#UInt8': (AsnInt, 0, 255),
'http://www.esa.int/Smp#UInt16': (AsnInt, 0, 65535),
'http://www.esa.int/Smp#UInt32': (AsnInt, 0, 4294967295),
'http://www.esa.int/Smp#UInt64': (AsnInt, 0, 9223372036854775807),
'http://www.esa.int/Smp#Float32': (AsnReal, -3.4E37, 3.4E37),
'http://www.esa.int/Smp#Float64': (AsnReal, -1.8E307, 1.8E307)
})
def setVerbosity(level):
global g_verboseLevel
g_verboseLevel = level
def info(level, *args):
'''Checks the 'level' argument against g_verboseLevel and then prints
the rest of the args, one by one, separated by a space. It also
has logic to deal with usage of one of the colors as arguments
(in which case it avoids printing spurious spaces).
'''
if not args:
panic("You called info without args") # pragma: no cover
if level<=g_verboseLevel:
for i in xrange(len(args)):
if i !=0 and args[i-1] not in colors:
sys.stdout.write(' ')
sys.stdout.write(args[i])
for i in xrange(len(args)-1, -1, -1):
if args[i] in colors:
continue
if not args[i].endswith('\n'):
sys.stdout.write('\n')
return
def panic(x, coloredBanner=""):
'''Notifies the user that something fatal happened and aborts. '''
info(0, yellow + coloredBanner + white + '\n' + x)
sys.exit(1)
class DashUnderscoreAgnosticDict(dict):
'''A dictionary that automatically replaces '_' to '-' in its keys. '''
def __setitem__(self, key, value):
super(DashUnderscoreAgnosticDict, self).__setitem__(key.replace('_', '-'), value)
def __getitem__(self, key):
return super(DashUnderscoreAgnosticDict, self).__getitem__(key.replace('_', '-'))
def __contains__(self, key):
return super(DashUnderscoreAgnosticDict, self).__contains__(key.replace('_', '-'))
class Attributes:
'''Helper class, to ease access to XML attributes.
It allows us to write code like this...
a = Attributes(lxmlEtreeNode)
whatever = a.href
print a.title
...instead of this:
whatever = lxmlEtreeNode.get('href', None)
print a.get('title', None)
'''
def __init__(self, t):
'''Argument t is an lxml Etree node.'''
self._attrs = {}
for k, v in t.items():
endBraceIdx = k.find('}')
if endBraceIdx != -1:
k = k[endBraceIdx+1:]
self._attrs[k] = v
def __getattr__(self, x):
return self._attrs.get(x, None)
def Clean(fieldName):
'''When mapping field names and type names from SMP2 to ASN.1,
we need to change '_' to '-'. '''
return re.sub(r'[^a-zA-Z0-9-]', '-', fieldName)
def MapSMP2Type(attrs, enumOptions, itemTypes, fields):
'''
Core mapping function. Works on the XML attributes of the lxml Etree node,
and returns a node from commonPy.asnAST.
'''
location = 'from %s, in line %s' % (attrs.base, attrs.sourceline)
info(2, "Mapping SMP2 type", location)
def getMaybe(cast, x):
try:
return cast(x)
except: # pragma: no cover
return None # pragma: no cover
dataDict = {"asnFilename": attrs.base, "lineno": attrs.sourceline}
if attrs.type == 'Types:Integer':
low = getMaybe(int, attrs.Minimum)
high = getMaybe(int, attrs.Maximum)
if low==0 and high==1:
# Pseudo-boolean from TASTE mapping, as per SpaceBel instructions
return AsnBool(**dataDict)
else:
# Normal integer
span = [low, high] if low is not None and high is not None else []
dataDict["range"] = span
return AsnInt(**dataDict)
elif attrs.type == 'Types:Float':
low = getMaybe(float, attrs.Minimum)
high = getMaybe(float, attrs.Maximum)
span = [low, high] if low is not None and high is not None else []
dataDict["range"] = span
return AsnReal(**dataDict)
elif attrs.type == 'Types:Enumeration':
dataDict["members"] = enumOptions
return AsnEnumerated(**dataDict)
elif attrs.type == 'Types:String':
high = getMaybe(int, attrs.Length)
span = [high, high] if high is not None else []
dataDict["range"] = span
return AsnOctetString(**dataDict)
elif attrs.type == 'Types:Array':
if itemTypes == []:
panic("Missing mandatory ItemType element", location) # pragma: no cover
itemTypeAttrs = Attributes(itemTypes[0])
arrSize = getMaybe(int, attrs.Size)
if not arrSize:
panic("Missing array 'Size' attribute", location) # pragma: no cover
dataDict["range"] = [arrSize, arrSize]
if itemTypeAttrs.href in [
'http://www.esa.int/2005/10/Smp#Char8',
'http://www.esa.int/2005/10/Smp#Int8',
'http://www.esa.int/2005/10/Smp#UInt8']:
return AsnOctetString(**dataDict)
else:
containedHref = itemTypeAttrs.href
if not containedHref:
panic("Missing reference to 'href' (file:%s, line:%d)" %
itemTypeAttrs.base, itemTypeAttrs.sourceline) # pragma: no cover
idxHash = containedHref.find('#')
if -1 != idxHash:
containedHref = containedHref[idxHash+1:]
if itemTypeAttrs.href in simpleTypesTable:
# Create the AsnBasicNode this child maps to.
cast, low, high = simpleTypesTable[itemTypeAttrs.href]
span=[low, high] if low is not None and high is not None else []
childDict = {
'asnFilename': itemTypes[0].base,
'lineno': itemTypes[0].sourceline
}
if span != []:
childDict['range'] = span
childNode = cast(**childDict)
dataDict['containedType'] = childNode
else:
# Store the 'Id' attribute - we will resolve this
# in the FixupOutOfOrderIdReferences function.
dataDict['containedType'] = containedHref
return AsnSequenceOf(**dataDict)
elif attrs.type == 'Types:Structure':
members = []
for field in fields:
try:
fieldName = field.get('Name')
if fieldName != 'choiceIdx':
fieldName = Clean(fieldName)
fieldName = fieldName[0].lower() + fieldName[1:]
try:
refTypeAttrs = Attributes(field.xpath("Type")[0])
except: # pragma: no cover
location = 'from %s, in line %s' % \
(field.base, field.sourceline) # pragma: no cover
panic("Missing Type child element", location) # pragma: no cover
refTypeHref = refTypeAttrs.href
idxHash = refTypeHref.find('#')
if -1 != idxHash:
refTypeHref = refTypeHref[idxHash+1:]
if refTypeAttrs.href in simpleTypesTable:
cast, low, high = simpleTypesTable[refTypeAttrs.href]
containedDict = {
'asnFilename': field.base,
'lineno': field.sourceline
}
span = [low, high] if low is not None and high is not None else []
if span != []:
containedDict['range']=[low, high]
basicNode = cast(**containedDict)
members.append((fieldName, basicNode))
else:
members.append((fieldName, AsnMetaMember(
asnFilename=field.base,
lineno=field.sourceline,
containedType=refTypeHref)))
else:
members.append((fieldName, 'dummy'))
except Exception, e: # pragma: no cover
panic(str(e) + '\nMake sure that:\n'
'1. The "Name" attribute exists\n'
'2. The "Type" child element, with attribute '
'"xlink:title" also exists.',
'In %s, line %d:' % (field.base, field.sourceline)) # pragma: no cover
if 0 == len(members):
panic("Empty SEQUENCE is not supported", location) # pragma: no cover
if members[0][0] == 'choiceIdx':
dataDict['members'] = members[1:]
return AsnChoice(**dataDict)
else:
dataDict['members'] = members
return AsnSequence(**dataDict)
panic("Failed to map... (%s)" % attrs.type, location) # pragma: no cover
def FixupOutOfOrderIdReferences(nodeTypename, asnTypesDict, idToTypeDict):
'''Based on the uniqueness of the 'Id' elements used in
'xlink:href' remote references, we resolve the lookups of
remote types that we stored in AsnMetaMembers during MapSMP2Type().'''
node = asnTypesDict[nodeTypename]
if isinstance(node, AsnChoice) or isinstance(node, AsnSequence) or isinstance(node, AsnSet):
for idx, child in enumerate(node._members):
if isinstance(child[1], AsnMetaMember):
containedType = child[1]._containedType
if containedType in idToTypeDict:
containedType = idToTypeDict[containedType]
if containedType in asnTypesDict:
node._members[idx] = (child[0], asnTypesDict[containedType])
else:
panic("Could not resolve Field '%s' in type '%s' (contained: %s)..." %
(child[0], nodeTypename, containedType), node.Location()) # pragma: no cover
elif isinstance(node, AsnSequenceOf) or isinstance(node, AsnSetOf):
if isinstance(node._containedType, str):
containedType = node._containedType
if containedType in idToTypeDict:
containedType = idToTypeDict[containedType]
if containedType in asnTypesDict:
node._containedType = asnTypesDict[containedType]
else:
panic("In type '%s', could not resolve: %s)" %
(nodeTypename, containedType), node.Location()) # pragma: no cover
def ConvertCatalogueToASN_AST(inputSmp2Files):
'''Converts a list of input SMP2 Catalogues into an ASN.1 AST,
which it returns to the caller.'''
asnTypesDict = DashUnderscoreAgnosticDict()
idToTypeDict = {}
allSMP2Types = {}
# Do a first pass, verifying the primary assumption:
# That 'Id' elements of types are unique across our set of SMP2 files.
for inputSmp2File in inputSmp2Files:
a=etree.parse(open(inputSmp2File))
root=a.getroot()
if len(root)<1 or not root.tag.endswith('Catalogue'):
panic('', "You must use an XML file that contains an SMP2 Catalogue") # pragma: no cover
for t in root.xpath("//Type"):
a = Attributes(t)
if not a.Id: # Missing attribute Id, don't bother checking for duplicates
continue
if a.Id in allSMP2Types:
catalogue = allSMP2Types[a.Id] # pragma: no cover
if catalogue != inputSmp2File: # pragma: no cover
panic("The same Id exists in two files: %s exists in both: %s" %
(a.Id, str([catalogue, inputSmp2File]))) # pragma: no cover
else:
allSMP2Types[a.Id] = inputSmp2File
for inputSmp2File in inputSmp2Files:
a=etree.parse(open(inputSmp2File))
root=a.getroot()
if len(root)<1 or not root.tag.endswith('Catalogue'):
panic('', "You must use an XML file that contains an SMP2 Catalogue") # pragma: no cover
for t in root.xpath("//Type"):
# Find the enclosing Namespace element
for namespace in t.iterancestors(tag='Namespace'):
break
else:
panic("No Namespace parent node found (file:%s, line:%d)" %
t.base, t.sourceline) # pragma: no cover
# Store the namespace 'Name' attribute, and use it to prefix our types
nsName = namespace.get('Name')
if not nsName:
panic("Missing attribute Name from Namespace (file:%s, line:%d)" %
namespace.base, namespace.sourceline) # pragma: no cover
cataloguePrefix = Clean(nsName).capitalize() + "_"
a = Attributes(t)
a.base = t.base
a.sourceline = t.sourceline
if not a.type:
# Check to see if this is one of the hardcoded types
if a.href in simpleTypesTable:
k = a.href
v = simpleTypesTable[k]
nodeTypename = a.title
if nodeTypename is None:
panic("'xlink:href' points to ready-made SMP2 type, but 'xlink:title' is missing! (file:%s, line:%d)" %
a.base, a.sourceline) # pragma: no cover
nodeTypename = Clean(nodeTypename.split()[-1]).capitalize() # Primitive Int32 -> Int32
cast, low, high = v
containedDict = {
'asnFilename': a.base,
'lineno': a.sourceline
}
span = [low, high] if (low is not None and high is not None) else []
if span != []:
containedDict['range']=[low, high]
# Especially for these hardcoded types, don't prefix with namespace.Name
asnTypesDict[nodeTypename] = cast(**containedDict)
else:
if a.href is not None and a.href.startswith("http://www.esa.int/"):
print "WARNING: Unknown hardcoded (%s) - should it be added in commonSMP2.py:simpleTypesTable?" % a.href
# This <Type> element had no xsi:type, and it's xlink:title was not in the hardcoded list
# Skip it.
# panic("Both 'xsi:type' and 'Name' are mandatory attributes (file:%s, line:%d)" %
# (a.base, a.sourceline)) # pragma: no cover
continue
# The type was merged in the AST or skipped over - work on the next one
continue
if a.type.startswith('Catalogue:'):
# We only wants Types, nothing more
continue
nodeTypename = a.Name
nodeTypename = nodeTypename[0].upper() + nodeTypename[1:]
nodeTypename = nodeTypename.replace('_', '-')
# Gather children node's info:
# 1. Enumeration data
enumOptions = []
if a.type == 'Types:Enumeration':
for node in t.xpath("Literal"):
enumOptions.append([x.replace('_', '-').lower() for x in map(node.get, ['Name', 'Value'])])
# 2. ItemType data (used in arrays)
itemTypes = t.xpath("ItemType")
# 3. Field data (used in structures)
fields = t.xpath("Field")
try:
description = t.xpath("Description")[0].text
except: # pragma: no cover
location = 'from %s, in line %s' % \
(t.base, t.sourceline) # pragma: no cover
panic("Missing Description child element", location) # pragma: no cover
info(2, "Creating type:", cataloguePrefix+nodeTypename)
asnNode = MapSMP2Type(a, enumOptions, itemTypes, fields)
if 'artificial' in description:
asnNode._isArtificial = True
asnTypesDict[cataloguePrefix + nodeTypename] = asnNode
# Store mapping from Id to typename in idToTypeDict
# (used below, in FixupOutOfOrderIdReferences)
idToTypeDict[a.Id] = cataloguePrefix + nodeTypename
for nodeTypename in asnTypesDict.keys():
FixupOutOfOrderIdReferences(nodeTypename, asnTypesDict, idToTypeDict)
return asnTypesDict, idToTypeDict
../commonPy/createInternalTypes.py
\ No newline at end of file
#!/usr/bin/env python
# (C) Semantix Information Technologies.
#
# Semantix Information Technologies is licensing the code of the
# Data Modelling Tools (DMT) in the following dual-license mode:
#
# Commercial Developer License:
# The DMT Commercial Developer License is the suggested version
# to use for the development of proprietary and/or commercial software.
# This version is for developers/companies who do not want to comply
# with the terms of the GNU Lesser General Public License version 2.1.
#
# GNU LGPL v. 2.1:
# This version of DMT is the one to use for the development of
# applications, when you are willing to comply with the terms of the
# GNU Lesser General Public License version 2.1.
#
# Note that in both cases, there are no charges (royalties) for the
# generated code.
#
'''
SMP2 Catalogues datatypes importer
This tool parses SMP2 Catalogues, extracts the data types described in them,
and maps them to the corresponding ASN.1 data type declarations. It also
includes logic to merge these types to pre-existing data type declarations
of an ASN.1 file - thus allowing merging of SMP2 designs into TASTE designs
(datatype-wise).
'''
import os
import sys
import getopt
import commonPy.asnParser
from createInternalTypes import ScanChildren
from commonPy.asnAST import AsnMetaType
from commonSMP2 import \
info, panic, green, white, red, setVerbosity, \
DashUnderscoreAgnosticDict, ConvertCatalogueToASN_AST
def usage(coloredMsg=""):
'''Prints help message and aborts. '''
usageMsg = 'Usage: smp2asn <options> <smp2Catalogues...>\n\n' \
'Where options must include:\n' \
' -o, --outAsn1=newAsnGrammar.asn the output ASN.1 grammar, containing\n'\
' all the SMP2 types (if -a was used,\n'\
' the existing ASN.1 types, too)\n'\
'Options may also include:\n'\
' -a, --asn1=asnGrammar.asn an input ASN.1 grammar to merge with\n'\
' -p, --prune prune unnamed (inner) SMP2-translation types\n'\
' -v, --verbose Be more verbose (debugging)\n' \
' -h, --help Show this help message\n'
panic(usageMsg, coloredMsg)
def MergeASN1_AST(smp2AsnAST):
'''Merges the ASN.1 AST generated from SMP2 files (smp2AsnAST param)
into the ASN.1 AST stored in commonPy.asnParser.g_names. Uses smart
merging, i.e. diff-like semantics.'''
typesToAddVerbatim = []
identicals = {}
d = commonPy.asnParser.g_names
for k, v in smp2AsnAST.iteritems():
if k in d:
# Type name exists in both trees - is it the same?
if not v.IdenticalPerSMP2(d[k], smp2AsnAST, d): # pragma: no cover
panic(green + k + white + " exists, but is different:\n" +
"it is...\n" + d[k].AsASN1(d) + "\n" +
"but in SMP2 it is...\n" + v.AsASN1(smp2AsnAST)) # pragma: no cover
else: # pragma: no cover
info(1, green, k, white, "exists and is semantically equivalent.") # pragma: no cover
else:
# Find an identical type if possible
for k2, v2 in d.iteritems():
if v2._isArtificial:
# Avoid mapping to artificially generated inner types
# (see last part of VerifyAndFixAST in commonPy.asnParser)
continue
if v2.IdenticalPerSMP2(v, d, smp2AsnAST):
info(1, green, k, white, "is identical to", red, k2, white)
identicals[k] = k2
break
else:
info(1, green, k, white, "must be copied (no equivalent type found)...")
typesToAddVerbatim.append(k)
# Merge missing types in commonPy.asnParser.g_names
for nodeTypename in typesToAddVerbatim:
results = []
node = smp2AsnAST[nodeTypename]
# Take care to add dependencies first
ScanChildren(nodeTypename, node, smp2AsnAST, results, isRoot=True, createInnerNodesInNames=False)
info(1, "Will copy", nodeTypename, "("+str(node.__class__)+")", ("and "+str(results) if results else ''))
results.append(nodeTypename)
for r in results:
node = smp2AsnAST[r]
d[r] = node
if isinstance(node, AsnMetaType):
commonPy.asnParser.g_metatypes[r] = r._containedType # pragma: no cover
d[r] = smp2AsnAST[r._containedType] # pragma: no cover
commonPy.asnParser.g_typesOfFile.setdefault(node._asnFilename, []).append(r)
return identicals
def SaveASN_AST(bPruneUnnamedInnerTASTEtypes, outputAsn1Grammar, identicals):
d = DashUnderscoreAgnosticDict()
for k, v in commonPy.asnParser.g_names.iteritems():
d[k] = v
with open(outputAsn1Grammar, 'w') as f:
f.write('DATAVIEW DEFINITIONS AUTOMATIC TAGS ::= BEGIN\n\n')
for k, v in d.iteritems():
if v._isArtificial:
# Don't emit artificially generated inner types
# (see last part of VerifyAndFixAST in commonPy.asnParser)
continue
if bPruneUnnamedInnerTASTEtypes and 'TaStE' in k:
# Don't emit artificially generated SMP2 types