CMSIS-DSP: Testing framework and database

Added example sql script to compute ratios frrom database.
Modified build so that currentConfig.csv is per build folder.
Modified script to use the new location of currentConfig.csv
pull/19/head
Christophe Favergeon 6 years ago
parent 5d8d010b6c
commit 759a21682b

@ -211,7 +211,7 @@ configApp(Testing ${ROOT})
target_link_libraries(Testing PRIVATE TestingLib)
target_link_libraries(Testing PRIVATE FrameworkLib)
writeConfig("currentConfig.csv")
writeConfig(${CMAKE_CURRENT_BINARY_DIR}/currentConfig.csv)

@ -20,7 +20,8 @@ import re
# For table creation
MKSTRFIELD=['NAME','Regression']
MKBOOLFIELD=['HARDFP', 'FASTMATH', 'NEON', 'UNROLL', 'ROUNDING','OPTIMIZED']
MKINTFIELD=['ID', 'MAX','MAXREGCOEF']
MKINTFIELD=['ID','MAX']
MKREALFIELD=['MAXREGCOEF']
MKDATEFIELD=['DATE']
MKKEYFIELD=['CATEGORY', 'PLATFORM', 'CORE', 'COMPILER','TYPE']
MKKEYFIELDID={'CATEGORY':'categoryid',
@ -32,7 +33,8 @@ MKKEYFIELDID={'CATEGORY':'categoryid',
# For table value extraction
VALSTRFIELD=['NAME','VERSION','Regression']
VALBOOLFIELD=['HARDFP', 'FASTMATH', 'NEON', 'UNROLL', 'ROUNDING','OPTIMIZED']
VALINTFIELD=['ID', 'MAX','MAXREGCOEF']
VALINTFIELD=['ID', 'MAX']
VALREALFIELD=['MAXREGCOEF']
VALDATEFIELD=['DATE']
VALKEYFIELD=['CATEGORY', 'PLATFORM', 'CORE', 'COMPILER','TYPE']
@ -63,6 +65,8 @@ def getColumns(elem,full):
colsToKeep.append(field)
if field in MKINTFIELD:
colsToKeep.append(field)
if field in MKREALFIELD:
colsToKeep.append(field)
if field in MKKEYFIELD:
colsToKeep.append(field)
if field in MKDATEFIELD:
@ -90,6 +94,8 @@ def createTableIfMissing(conn,elem,tableName,full):
sql += "%s\n %s TEXT" % (start,field)
if field in MKINTFIELD:
sql += "%s\n %s INTEGER" % (start,field)
if field in MKREALFIELD:
sql += "%s\n %s REAL" % (start,field)
if field in MKKEYFIELD:
sql += "%s\n %s INTEGER" % (start,MKKEYFIELDID[field])
if field in MKDATEFIELD:
@ -201,6 +207,8 @@ def addRows(conn,elem,tableName,full):
if field in VALINTFIELD:
keys[field]=row[field]
if field in VALREALFIELD:
keys[field]=row[field]
if field in VALDATEFIELD:
keys[field]=row[field]
if field in VALBOOLFIELD:
@ -237,7 +245,10 @@ def addRows(conn,elem,tableName,full):
if field in MKSTRFIELD or field in MKDATEFIELD:
sql += " %s\n \"%s\"" % (start,keys[field])
elif field in keep:
sql += " %s\n %d" % (start,keys[field])
if field in VALREALFIELD:
sql += " %s\n %f" % (start,keys[field])
else:
sql += " %s\n %d" % (start,keys[field])
start = ","
sql += " )"

@ -0,0 +1,48 @@
.headers ON
/*
Select the core to be used as reference. Only last day of measurements is used.
*/
CREATE TEMP VIEW if not exists refCore AS select *
from Unary
where coreid=5 AND DATE BETWEEN datetime('now','localtime','-23 hours') AND datetime('now', 'localtime');
;
/*
Select the cores to be benchmarked compared with the reference. Only last day of measurements is used.
*/
CREATE TEMP VIEW if not exists otherCores AS select *
from Unary
where coreid != 5 AND DATE BETWEEN datetime('now','localtime','-23 hours') AND datetime('now', 'localtime');
;
/*
Using regression database, compute the ratio using max cycles
and max degree regression coefficient.
Change name of columns for result
*/
select temp.otherCores.ID as ID,
CATEGORY.category as CATEGORY,
temp.otherCores.NAME as NAME,
PLATFORM.platform as PLATFORM,
CORE.core as CORE,
COMPILERKIND.compiler as COMPILER,
COMPILER.version as COMPILERVERSION,
TYPE.type as TYPE,
temp.otherCores.DATE as DATE,
(1.0*temp.refCore.MAX / temp.otherCores.MAX) as MAXRATIO,
(1.0*temp.refCore.MAXREGCOEF / temp.otherCores.MAXREGCOEF) as REGRESSIONRATIO
from temp.otherCores
INNER JOIN temp.refCore USING(ID,categoryid,NAME)
INNER JOIN CATEGORY USING(categoryid)
INNER JOIN PLATFORM USING(platformid)
INNER JOIN CORE USING(coreid)
INNER JOIN COMPILER USING(compilerid)
INNER JOIN COMPILERKIND USING(compilerkindid)
INNER JOIN TYPE USING(typeid)

@ -234,7 +234,7 @@ def getCyclesFromTrace(trace):
else:
return(TestScripts.ParseTrace.getCycles(trace))
def analyseResult(root,results,embedded,benchmark,trace,formatter):
def analyseResult(resultPath,root,results,embedded,benchmark,trace,formatter):
calibration = 0
if trace:
# First cycle in the trace is the calibration data
@ -305,7 +305,7 @@ def analyseResult(root,results,embedded,benchmark,trace,formatter):
#benchFile.write("ID,%s,PASSED,ERROR,CYCLES\n" % header)
csvheaders = ""
with open('currentConfig.csv', 'r') as f:
with open(os.path.join(resultPath,'currentConfig.csv'), 'r') as f:
reader = csv.reader(f)
csvheaders = next(reader, None)
configList = list(reader)
@ -406,12 +406,15 @@ def analyseResult(root,results,embedded,benchmark,trace,formatter):
def analyze(root,results,args,trace):
# currentConfig.csv should be in the same place
resultPath=os.path.dirname(args.r)
if args.c:
analyseResult(root,results,args.e,args.b,trace,CSVFormatter())
analyseResult(resultPath,root,results,args.e,args.b,trace,CSVFormatter())
elif args.m:
analyseResult(root,results,args.e,args.b,trace,MathematicaFormatter())
analyseResult(resultPath,root,results,args.e,args.b,trace,MathematicaFormatter())
else:
analyseResult(root,results,args.e,args.b,trace,TextFormatter())
analyseResult(resultPath,root,results,args.e,args.b,trace,TextFormatter())
parser = argparse.ArgumentParser(description='Parse test description')

@ -55,14 +55,14 @@ def formatProd(a,b):
return(str(b))
return("%s * %s" % (a,b))
def summaryBenchmark(elem,path):
def summaryBenchmark(resultPath,elem,path):
regressionPath=os.path.join(os.path.dirname(path),"regression.csv")
print(" Generating %s" % regressionPath)
full=pd.read_csv(path,dtype={'OLDID': str} ,keep_default_na = False)
#print(full)
csvheaders = []
with open('currentConfig.csv', 'r') as f:
with open(os.path.join(resultPath,'currentConfig.csv'), 'r') as f:
reader = csv.reader(f)
csvheaders = next(reader, None)
@ -91,15 +91,15 @@ def summaryBenchmark(elem,path):
regression.to_csv(regressionPath,index=False,quoting=csv.QUOTE_NONNUMERIC)
def extractBenchmarks(benchmark,elem):
def extractBenchmarks(resultPath,benchmark,elem):
if not elem.data["deprecated"]:
if elem.params:
benchPath = os.path.join(benchmark,elem.fullPath(),"fullBenchmark.csv")
print("Processing %s" % benchPath)
summaryBenchmark(elem,benchPath)
summaryBenchmark(resultPath,elem,benchPath)
for c in elem.children:
extractBenchmarks(benchmark,c)
extractBenchmarks(resultPath,benchmark,c)
@ -107,7 +107,8 @@ parser = argparse.ArgumentParser(description='Generate summary benchmarks')
parser.add_argument('-f', nargs='?',type = str, default=None, help="Test description file path")
parser.add_argument('-b', nargs='?',type = str, default="FullBenchmark", help="Full Benchmark dir path")
#parser.add_argument('-e', action='store_true', help="Embedded test")
# Needed to find the currentConfig.csv and know the headers
parser.add_argument('-r', nargs='?',type = str, default=None, help="Result file path")
parser.add_argument('others', nargs=argparse.REMAINDER)
@ -118,7 +119,8 @@ if args.f is not None:
# Parse the test description file
root = p.parse(args.f)
d.deprecate(root,args.others)
extractBenchmarks(args.b,root)
resultPath=os.path.dirname(args.r)
extractBenchmarks(resultPath,args.b,root)
else:
parser.print_help()
Loading…
Cancel
Save