CMSIS-DSP: Added M55 to test framework.

Added a regression script to test several configurations of
the CMSIS-DSP.
pull/19/head
Christophe Favergeon 6 years ago
parent 64d43685fb
commit 23f969c6c7

@ -1,4 +1,4 @@
#! armclang -E --target=arm-arm-none-eabi -mcpu=cortex-a5 -xc #! armcc -E
;************************************************** ;**************************************************
; Copyright (c) 2017 ARM Ltd. All rights reserved. ; Copyright (c) 2017 ARM Ltd. All rights reserved.
;************************************************** ;**************************************************

@ -58,6 +58,8 @@ void FIQ_Handler (void) __attribute__ ((weak, alias("Default_Handler")));
Exception / Interrupt Vector Table Exception / Interrupt Vector Table
*----------------------------------------------------------------------------*/ *----------------------------------------------------------------------------*/
void Vectors(void) { void Vectors(void) {
volatile int i;
#if 0
__ASM volatile( __ASM volatile(
"LDR __current_pc, =Reset_Handler \n" "LDR __current_pc, =Reset_Handler \n"
"LDR __current_pc, =Undef_Handler \n" "LDR __current_pc, =Undef_Handler \n"
@ -68,12 +70,14 @@ void Vectors(void) {
"LDR __current_pc, =IRQ_Handler \n" "LDR __current_pc, =IRQ_Handler \n"
"LDR __current_pc, =FIQ_Handler \n" "LDR __current_pc, =FIQ_Handler \n"
); );
#endif
} }
/*---------------------------------------------------------------------------- /*----------------------------------------------------------------------------
Reset Handler called on controller reset Reset Handler called on controller reset
*----------------------------------------------------------------------------*/ *----------------------------------------------------------------------------*/
void Reset_Handler(void) { void Reset_Handler(void) {
#if 0
__ASM volatile( __ASM volatile(
// Mask interrupts // Mask interrupts
@ -128,6 +132,7 @@ void Reset_Handler(void) {
// Call __main // Call __main
"BL __main \n" "BL __main \n"
); );
#endif
} }
/*---------------------------------------------------------------------------- /*----------------------------------------------------------------------------

@ -15,3 +15,5 @@ currentConfig.csv
test.txt test.txt
__pycache__ __pycache__
bugcheck.py bugcheck.py
fulltests/
testUNIXrunConfig.yaml

@ -300,6 +300,11 @@ If you want to compute summary statistics with regression:
pip install numpy pip install numpy
pip install panda pip install panda
If you want to run the script which is launching all the tests on all possible configurations then
you'll need yaml:
pip install pyyaml
### Generate the test patterns in Patterns folder ### Generate the test patterns in Patterns folder
We have archived lot of test patterns on github. So this step is needed only if you write new test patterns. We have archived lot of test patterns on github. So this step is needed only if you write new test patterns.

@ -1,5 +1,6 @@
#include "TransformF32.h" #include "TransformF32.h"
#include "Error.h" #include "Error.h"
#include "arm_math.h"
#include "arm_const_structs.h" #include "arm_const_structs.h"
const arm_cfft_instance_f32 *arm_cfft_get_instance_f32(uint16_t fftLen) const arm_cfft_instance_f32 *arm_cfft_get_instance_f32(uint16_t fftLen)

@ -0,0 +1,349 @@
import subprocess
import colorama
from colorama import init,Fore, Back, Style
import argparse
import os
import os.path
from contextlib import contextmanager
import shutil
import glob
from pathlib import Path
DEBUGMODE = False
NOTESTFAILED = 0
MAKEFAILED = 1
TESTFAILED = 2
FLOWFAILURE = 3
CALLFAILURE = 4
def joinit(iterable, delimiter):
it = iter(iterable)
yield next(it)
for x in it:
yield delimiter
yield x
class TestFlowFailure(Exception):
def __init__(self,completed):
self._errorcode = completed.returncode
def errorCode(self):
return(self._errorcode)
class CallFailure(Exception):
pass
def check(n):
#print(n)
if n is not None:
if n.returncode != 0:
raise TestFlowFailure(n)
else:
raise CallFailure()
def msg(t):
print(Fore.CYAN + t + Style.RESET_ALL)
def errorMsg(t):
print(Fore.RED + t + Style.RESET_ALL)
def fullTestFolder(rootFolder):
return(os.path.join(rootFolder,"CMSIS","DSP","Testing","fulltests"))
class BuildConfig:
def __init__(self,toUnset,rootFolder,buildFolder,compiler,toolchain,core,cmake):
self._toUnset = toUnset
self._buildFolder = buildFolder
self._rootFolder = os.path.abspath(rootFolder)
self._dspFolder = os.path.join(self._rootFolder,"CMSIS","DSP")
self._testingFolder = os.path.join(self._dspFolder,"Testing")
self._fullTests = os.path.join(self._testingFolder,"fulltests")
self._compiler = compiler
self._toolchain = toolchain
self._core = core
self._cmake = cmake
self._savedEnv = {}
def compiler(self):
return(self._compiler)
def toolChainFile(self):
return(self._toolchain)
def core(self):
return(self._core)
def path(self):
return(os.path.join(self._fullTests,self._buildFolder))
def archivePath(self):
return(os.path.join(self._fullTests,"archive",self._buildFolder))
def archiveResultPath(self):
return(os.path.join(self._fullTests,"archive",self._buildFolder,"results"))
def archiveLogPath(self):
return(os.path.join(self._fullTests,"archive",self._buildFolder,"logs"))
def archiveErrorPath(self):
return(os.path.join(self._fullTests,"archive",self._buildFolder,"errors"))
def toolChainPath(self):
return(self._dspFolder)
def cmakeFilePath(self):
return(self._testingFolder)
def buildFolderName(self):
return(self._buildFolder)
def saveEnv(self):
if self._toUnset is not None:
for v in self._toUnset:
self._savedEnv[v] = os.environ[v]
del os.environ[v]
def restoreEnv(self):
if self._toUnset is not None:
for v in self._toUnset:
os.environ[v] = self._savedEnv[v]
self._savedEnv = {}
# Build for a folder
# We need to be able to detect failed build
def build(self,test):
completed=None
# Save and unset some environment variables
self.saveEnv()
with self.buildFolder() as b:
msg(" Build %s\n" % self.buildFolderName())
with open(os.path.join(self.archiveLogPath(),"makelog_%s.txt" % test),"w") as makelog:
with open(os.path.join(self.archiveErrorPath(),"makeerror_%s.txt" % test),"w") as makeerr:
if DEBUGMODE:
completed=subprocess.run(["make","-j8","VERBOSE=1"],timeout=3600)
else:
completed=subprocess.run(["make","-j8","VERBOSE=1"],stdout=makelog,stderr=makeerr,timeout=3600)
# Restore environment variables
self.restoreEnv()
check(completed)
def getTest(self,test):
return(Test(self,test))
# Launch cmake command.
def createCMake(self,flags):
with self.buildFolder() as b:
self.saveEnv()
msg("Create cmake for %s\n" % self.buildFolderName())
toolchainCmake = os.path.join(self.toolChainPath(),self.toolChainFile())
cmd = [self._cmake]
cmd += ["-DCMAKE_PREFIX_PATH=%s" % self.compiler(),
"-DCMAKE_TOOLCHAIN_FILE=%s" % toolchainCmake,
"-DARM_CPU=%s" % self.core(),
"-DPLATFORM=FVP"
]
cmd += flags
cmd += ["-DBENCHMARK=OFF",
"-DFULLYCONNECTED=OFF",
"-DCONVOLUTION=OFF",
"-DACTIVATION=OFF",
"-DPOOLING=OFF",
"-DSOFTMAX=OFF",
"-DNNSUPPORT=OFF",
"-DBASICMATHSNN=OFF",
"-DRESHAPE=OFF",
"-DCONCATENATION=OFF",
"-DWRAPPER=OFF",
"-DCONFIGTABLE=OFF",
"-DROOT=%s" % self._rootFolder,
"-DCMAKE_BUILD_TYPE=Release",
"-G", "Unix Makefiles" ,"%s" % self.cmakeFilePath()]
with open(os.path.join(self.archiveLogPath(),"cmakecmd.txt"),"w") as cmakecmd:
cmakecmd.write("".join(joinit(cmd," ")))
with open(os.path.join(self.archiveLogPath(),"cmakelog.txt"),"w") as cmakelog:
with open(os.path.join(self.archiveErrorPath(),"cmakeerror.txt"),"w") as cmakeerr:
completed=subprocess.run(cmd, stdout=cmakelog,stderr=cmakeerr, timeout=3600)
self.restoreEnv()
check(completed)
# Create the build folder if missing
def createFolder(self):
os.makedirs(self.path(),exist_ok=True)
def createArchive(self, flags):
os.makedirs(self.archivePath(),exist_ok=True)
os.makedirs(self.archiveResultPath(),exist_ok=True)
os.makedirs(self.archiveErrorPath(),exist_ok=True)
os.makedirs(self.archiveLogPath(),exist_ok=True)
with open(os.path.join(self.archivePath(),"flags.txt"),"w") as f:
for flag in flags:
f.write(flag)
f.write("\n")
# Delete the build folder
def cleanFolder(self):
print("Delete %s\n" % self.path())
#DEBUG
if not DEBUGMODE:
shutil.rmtree(self.path())
# Archive results and currentConfig.csv to another folder
def archiveResults(self):
results=glob.glob(os.path.join(self.path(),"results_*"))
for result in results:
dst=os.path.join(self.archiveResultPath(),os.path.basename(result))
shutil.copy(result,dst)
src = os.path.join(self.path(),"currentConfig.csv")
dst = os.path.join(self.archiveResultPath(),os.path.basename(src))
shutil.copy(src,dst)
@contextmanager
def buildFolder(self):
current=os.getcwd()
try:
os.chdir(self.path() )
yield self.path()
finally:
os.chdir(current)
@contextmanager
def archiveFolder(self):
current=os.getcwd()
try:
os.chdir(self.archivePath() )
yield self.archivePath()
finally:
os.chdir(current)
@contextmanager
def resultFolder(self):
current=os.getcwd()
try:
os.chdir(self.archiveResultPath())
yield self.archiveResultPath()
finally:
os.chdir(current)
@contextmanager
def logFolder(self):
current=os.getcwd()
try:
os.chdir(self.archiveLogPath())
yield self.archiveLogPath()
finally:
os.chdir(current)
@contextmanager
def errorFolder(self):
current=os.getcwd()
try:
os.chdir(self.archiveErrorPath())
yield self.archiveErrorPath()
finally:
os.chdir(current)
class Test:
def __init__(self,build,test):
self._test = test
self._buildConfig = build
def buildConfig(self):
return(self._buildConfig)
def testName(self):
return(self._test)
# Process a test from the test description file
def processTest(self):
completed=subprocess.run(["python","processTests.py","-e",self.testName()],timeout=3600)
check(completed)
def getResultPath(self):
return(os.path.join(self.buildConfig().path() ,self.resultName()))
def resultName(self):
return("results_%s.txt" % self.testName())
# Run a specific test in the current folder
# A specific results.txt file is created in
# the build folder for this test
#
# We need a timeout and detect failed run
def run(self,fvp):
completed = None
with self.buildConfig().buildFolder() as b:
msg(" Run %s\n" % self.testName() )
with open(self.resultName(),"w") as results:
completed=subprocess.run(fvp.split(),stdout=results,timeout=3600)
check(completed)
# Process results of the given tests
# in given build folder
# We need to detect failed tests
def processResult(self):
msg(" Parse result for %s\n" % self.testName())
with open(os.path.join(self.buildConfig().archiveResultPath(),"processedResult_%s.txt" % self.testName()),"w") as presult:
completed=subprocess.run(["python","processResult.py","-e","-r",self.getResultPath()],stdout=presult,timeout=3600)
# When a test fail, the regression is continuing but we
# track that a test has failed
if completed.returncode==0:
return(NOTESTFAILED)
else:
return(TESTFAILED)
def runAndProcess(self,compiler,fvp):
# If we can't parse test description we fail all tests
self.processTest()
# Otherwise if only building or those tests are failing, we continue
# with other tests
try:
self.buildConfig().build(self.testName())
except:
return(MAKEFAILED)
# We run tests only for AC6
# For other compilers only build is tests
# Since full build is no more possible because of huge pattersn,
# build is done per test suite.
if compiler == "AC6":
if fvp is not None:
self.run(fvp)
return(self.processResult())
else:
msg("No FVP available")
return(NOTESTFAILED)
else:
return(NOTESTFAILED)
# Preprocess the test description
def preprocess():
msg("Process test description file\n")
completed = subprocess.run(["python", "preprocess.py","-f","desc.txt"],timeout=3600)
check(completed)
# Generate all missing C code by using all classes in the
# test description file
def generateAllCCode():
msg("Generate all missing C files\n")
completed = subprocess.run(["python","processTests.py", "-e"],timeout=3600)
check(completed)

@ -1,26 +1,26 @@
echo "Basic Maths" echo "Basic Maths"
python addToDB.py -f bench.txt BasicBenchmarks python addToDB.py BasicBenchmarks
echo "Complex Maths" echo "Complex Maths"
python addToDB.py -f bench.txt ComplexBenchmarks python addToDB.py ComplexBenchmarks
echo "FIR" echo "FIR"
python addToDB.py -f bench.txt FIR python addToDB.py FIR
echo "Convolution / Correlation" echo "Convolution / Correlation"
python addToDB.py -f bench.txt MISC python addToDB.py MISC
echo "Decimation / Interpolation" echo "Decimation / Interpolation"
python addToDB.py -f bench.txt DECIM python addToDB.py DECIM
echo "BiQuad" echo "BiQuad"
python addToDB.py -f bench.txt BIQUAD python addToDB.py BIQUAD
echo "Controller" echo "Controller"
python addToDB.py -f bench.txt Controller python addToDB.py Controller
echo "Fast Math" echo "Fast Math"
python addToDB.py -f bench.txt FastMath python addToDB.py FastMath
echo "Barycenter" echo "Barycenter"
python addToDB.py -f bench.txt SupportBarF32 python addToDB.py SupportBarF32
echo "Support" echo "Support"
python addToDB.py -f bench.txt Support python addToDB.py Support
echo "Unary Matrix" echo "Unary Matrix"
python addToDB.py -f bench.txt Unary python addToDB.py Unary
echo "Binary Matrix" echo "Binary Matrix"
python addToDB.py -f bench.txt Binary python addToDB.py Binary
echo "Transform" echo "Transform"
python addToDB.py -f bench.txt Transform python addToDB.py Transform

@ -1,26 +1,26 @@
echo "Basic Maths" echo "Basic Maths"
python addToRegDB.py -f bench.txt BasicBenchmarks python addToRegDB.py BasicBenchmarks
echo "Complex Maths" echo "Complex Maths"
python addToRegDB.py -f bench.txt ComplexBenchmarks python addToRegDB.py ComplexBenchmarks
echo "FIR" echo "FIR"
python addToRegDB.py -f bench.txt FIR python addToRegDB.py FIR
echo "Convolution / Correlation" echo "Convolution / Correlation"
python addToRegDB.py -f bench.txt MISC python addToRegDB.py MISC
echo "Decimation / Interpolation" echo "Decimation / Interpolation"
python addToRegDB.py -f bench.txt DECIM python addToRegDB.py DECIM
echo "BiQuad" echo "BiQuad"
python addToRegDB.py -f bench.txt BIQUAD python addToRegDB.py BIQUAD
echo "Controller" echo "Controller"
python addToRegDB.py -f bench.txt Controller python addToRegDB.py Controller
echo "Fast Math" echo "Fast Math"
python addToRegDB.py -f bench.txt FastMath python addToRegDB.py FastMath
echo "Barycenter" echo "Barycenter"
python addToRegDB.py -f bench.txt SupportBarF32 python addToRegDB.py SupportBarF32
echo "Support" echo "Support"
python addToRegDB.py -f bench.txt Support python addToRegDB.py Support
echo "Unary Matrix" echo "Unary Matrix"
python addToRegDB.py -f bench.txt Unary python addToRegDB.py Unary
echo "Binary Matrix" echo "Binary Matrix"
python addToRegDB.py -f bench.txt Binary python addToRegDB.py Binary
echo "Transform" echo "Transform"
python addToRegDB.py -f bench.txt Transform python addToRegDB.py Transform

@ -267,7 +267,7 @@ def addOneBenchmark(elem,fullPath,db,group):
else: else:
tableName = elem.data["class"] tableName = elem.data["class"]
conn = sqlite3.connect(db) conn = sqlite3.connect(db)
#createTableIfMissing(conn,elem,tableName,full) createTableIfMissing(conn,elem,tableName,full)
config = addRows(conn,elem,tableName,full) config = addRows(conn,elem,tableName,full)
addConfig(conn,config,fullDate) addConfig(conn,config,fullDate)
conn.close() conn.close()
@ -287,7 +287,7 @@ def addToDB(benchmark,dbpath,elem,group):
parser = argparse.ArgumentParser(description='Generate summary benchmarks') parser = argparse.ArgumentParser(description='Generate summary benchmarks')
parser.add_argument('-f', nargs='?',type = str, default="Output.pickle", help="Test description file path") parser.add_argument('-f', nargs='?',type = str, default="Output.pickle", help="File path")
parser.add_argument('-b', nargs='?',type = str, default="FullBenchmark", help="Full Benchmark dir path") parser.add_argument('-b', nargs='?',type = str, default="FullBenchmark", help="Full Benchmark dir path")
#parser.add_argument('-e', action='store_true', help="Embedded test") #parser.add_argument('-e', action='store_true', help="Embedded test")
parser.add_argument('-o', nargs='?',type = str, default="bench.db", help="Benchmark database") parser.add_argument('-o', nargs='?',type = str, default="bench.db", help="Benchmark database")

@ -298,7 +298,7 @@ def addToDB(benchmark,dbpath,elem,group):
parser = argparse.ArgumentParser(description='Generate summary benchmarks') parser = argparse.ArgumentParser(description='Generate summary benchmarks')
parser.add_argument('-f', nargs='?',type = str, default="Output.pickle", help="Test description file path") parser.add_argument('-f', nargs='?',type = str, default="Output.pickle", help="File path")
parser.add_argument('-b', nargs='?',type = str, default="FullBenchmark", help="Full Benchmark dir path") parser.add_argument('-b', nargs='?',type = str, default="FullBenchmark", help="Full Benchmark dir path")
#parser.add_argument('-e', action='store_true', help="Embedded test") #parser.add_argument('-e', action='store_true', help="Embedded test")
parser.add_argument('-o', nargs='?',type = str, default="reg.db", help="Regression benchmark database") parser.add_argument('-o', nargs='?',type = str, default="reg.db", help="Regression benchmark database")

@ -11,6 +11,9 @@ import csv
import TestScripts.ParseTrace import TestScripts.ParseTrace
import colorama import colorama
from colorama import init,Fore, Back, Style from colorama import init,Fore, Back, Style
import sys
resultStatus=0
init() init()
@ -336,6 +339,7 @@ def getCyclesFromTrace(trace):
return(TestScripts.ParseTrace.getCycles(trace)) return(TestScripts.ParseTrace.getCycles(trace))
def analyseResult(resultPath,root,results,embedded,benchmark,trace,formatter): def analyseResult(resultPath,root,results,embedded,benchmark,trace,formatter):
global resultStatus
calibration = 0 calibration = 0
if trace: if trace:
# First cycle in the trace is the calibration data # First cycle in the trace is the calibration data
@ -510,6 +514,8 @@ def analyseResult(resultPath,root,results,embedded,benchmark,trace,formatter):
params="" params=""
writeBenchmark(elem,benchFile,theId,theError,passed,cycles,params,config) writeBenchmark(elem,benchFile,theId,theError,passed,cycles,params,config)
# Format the node # Format the node
if not passed:
resultStatus=1
formatter.printTest(elem,theId,theError,errorDetail,theLine,passed,cycles,params) formatter.printTest(elem,theId,theError,errorDetail,theLine,passed,cycles,params)
@ -552,6 +558,8 @@ parser.add_argument('-t', nargs='?',type = str, default=None, help="External tra
args = parser.parse_args() args = parser.parse_args()
if args.f is not None: if args.f is not None:
#p = parse.Parser() #p = parse.Parser()
# Parse the test description file # Parse the test description file
@ -569,5 +577,7 @@ if args.f is not None:
with open(args.r,"r") as results: with open(args.r,"r") as results:
extractDataFiles(results,args.o) extractDataFiles(results,args.o)
sys.exit(resultStatus)
else: else:
parser.print_help() parser.print_help()

@ -1,55 +1,55 @@
@ECHO OFF @ECHO OFF
echo "Basic Maths" echo "Basic Maths"
python processTests.py -f bench.txt -e BasicBenchmarks python processTests.py -e BasicBenchmarks
call:runBench call:runBench
echo "Complex Maths" echo "Complex Maths"
python processTests.py -f bench.txt -e ComplexBenchmarks python processTests.py -e ComplexBenchmarks
call:runBench call:runBench
echo "FIR" echo "FIR"
python processTests.py -f bench.txt -e FIR python processTests.py -e FIR
call:runBench call:runBench
echo "Convolution / Correlation" echo "Convolution / Correlation"
python processTests.py -f bench.txt -e MISC python processTests.py -e MISC
call:runBench call:runBench
echo "Decimation / Interpolation" echo "Decimation / Interpolation"
python processTests.py -f bench.txt -e DECIM python processTests.py -e DECIM
call:runBench call:runBench
echo "BiQuad" echo "BiQuad"
python processTests.py -f bench.txt -e BIQUAD python processTests.py -e BIQUAD
call:runBench call:runBench
echo "Controller" echo "Controller"
python processTests.py -f bench.txt -e Controller python processTests.py -e Controller
call:runBench call:runBench
echo "Fast Math" echo "Fast Math"
python processTests.py -f bench.txt -e FastMath python processTests.py -e FastMath
call:runBench call:runBench
echo "Barycenter" echo "Barycenter"
python processTests.py -f bench.txt -e SupportBarF32 python processTests.py -e SupportBarF32
call:runBench call:runBench
echo "Support" echo "Support"
python processTests.py -f bench.txt -e Support python processTests.py -e Support
call:runBench call:runBench
echo "Unary Matrix" echo "Unary Matrix"
python processTests.py -f bench.txt -e Unary python processTests.py -e Unary
call:runBench call:runBench
echo "Binary Matrix" echo "Binary Matrix"
python processTests.py -f bench.txt -e Binary python processTests.py -e Binary
call:runBench call:runBench
echo "Transform" echo "Transform"
python processTests.py -f bench.txt -e Transform python processTests.py -e Transform
call:runBench call:runBench
EXIT /B EXIT /B
@ -64,7 +64,7 @@ REM "C:\Program Files\ARM\Development Studio 2019.0\sw\models\bin\FVP_MPS2_Corte
"C:\Program Files\ARM\Development Studio 2019.0\sw\models\bin\FVP_VE_Cortex-A5x1.exe" -a Testing > result.txt "C:\Program Files\ARM\Development Studio 2019.0\sw\models\bin\FVP_VE_Cortex-A5x1.exe" -a Testing > result.txt
popd popd
echo "Parse result" echo "Parse result"
REM python processResult.py -f bench.txt -e -r build_m7\result.txt REM python processResult.py -e -r build_m7\result.txt
REM python processResult.py -f bench.txt -e -r build_m0\result.txt REM python processResult.py -e -r build_m0\result.txt
python processResult.py -f bench.txt -e -r build_a5\result.txt python processResult.py -e -r build_a5\result.txt
goto:eof goto:eof

@ -0,0 +1,105 @@
import os
import os.path
import subprocess
import colorama
from colorama import init,Fore, Back, Style
import argparse
GROUPS = [
"BasicBenchmarks",
"ComplexBenchmarks",
"FIR",
"MISC",
"DECIM",
"BIQUAD",
"Controller",
"FastMath",
"SupportBarF32",
"Support",
"Unary",
"Binary",
"Transform"
]
init()
def msg(t):
print(Fore.CYAN + t + Style.RESET_ALL)
def processTest(test):
subprocess.call(["python","processTests.py","-e",test])
def addToDB(cmd):
for g in GROUPS:
msg("Add group %s" % g)
subprocess.call(["python",cmd,g])
def run(build,fvp,custom=None):
result = "results.txt"
resultPath = os.path.join(build,result)
current=os.getcwd()
try:
msg("Build" )
os.chdir(build)
subprocess.call(["make"])
msg("Run")
with open(result,"w") as results:
if custom:
subprocess.call([fvp] + custom,stdout=results)
else:
subprocess.call([fvp,"-a","Testing"],stdout=results)
finally:
os.chdir(current)
msg("Parse result")
subprocess.call(["python","processResult.py","-e","-r",resultPath])
msg("Regression computations")
subprocess.call(["python","summaryBench.py","-r",resultPath])
msg("Add results to benchmark database")
addToDB("addToDB.py")
msg("Add results to regression database")
addToDB("addToRegDB.py")
def processAndRun(buildfolder,fvp,custom=None):
processTest("DSPBenchmarks")
run(buildfolder,fvp,custom=custom)
parser = argparse.ArgumentParser(description='Parse test description')
parser.add_argument('-f', nargs='?',type = str, default="build_benchmark_m7", help="Build folder")
parser.add_argument('-v', nargs='?',type = str, default="C:\\Program Files\\ARM\\Development Studio 2019.0\\sw\\models\\bin\\FVP_MPS2_Cortex-M7.exe", help="Fast Model")
parser.add_argument('-c', nargs='?',type = str, help="Custom args")
args = parser.parse_args()
if args.f is not None:
BUILDFOLDER=args.f
else:
BUILDFOLDER="build_benchmark_m7"
if args.v is not None:
FVP=args.v
else:
FVP="C:\\Program Files\\ARM\\Development Studio 2019.0\\sw\\models\\bin\\FVP_MPS2_Cortex-M7.exe"
if args.c:
custom = args.c.split()
else:
custom = None
print(Fore.RED + "bench.db and reg.db databases must exist before running this script" + Style.RESET_ALL)
msg("Process benchmark description file")
subprocess.call(["python", "preprocess.py","-f","bench.txt"])
msg("Generate all missing C files")
subprocess.call(["python","processTests.py", "-e"])
processAndRun(BUILDFOLDER,FVP,custom=custom)

@ -4,126 +4,177 @@ import subprocess
import colorama import colorama
from colorama import init,Fore, Back, Style from colorama import init,Fore, Back, Style
import argparse import argparse
from TestScripts.Regression.Commands import *
import yaml
import sys
import itertools
from pathlib import Path
# Small state machine
def updateTestStatus(testStatusForThisBuild,newTestStatus):
if testStatusForThisBuild == NOTESTFAILED:
if newTestStatus == NOTESTFAILED:
return(NOTESTFAILED)
if newTestStatus == MAKEFAILED:
return(MAKEFAILED)
if newTestStatus == TESTFAILED:
return(TESTFAILED)
if testStatusForThisBuild == MAKEFAILED:
if newTestStatus == NOTESTFAILED:
return(MAKEFAILED)
if newTestStatus == MAKEFAILED:
return(MAKEFAILED)
if newTestStatus == TESTFAILED:
return(TESTFAILED)
if testStatusForThisBuild == TESTFAILED:
if newTestStatus == NOTESTFAILED:
return(TESTFAILED)
if newTestStatus == MAKEFAILED:
return(TESTFAILED)
if newTestStatus == TESTFAILED:
return(TESTFAILED)
if testStatusForThisBuild == FLOWFAILURE:
return(testStatusForThisBuild)
if testStatusForThisBuild == CALLFAILURE:
return(testStatusForThisBuild)
root = Path(os.getcwd()).parent.parent.parent
def cartesian(*somelists):
r=[]
for element in itertools.product(*somelists):
r.append(list(element))
return(r)
testFailed = 0
init() init()
def msg(t):
print(Fore.CYAN + t + Style.RESET_ALL)
def processTest(test):
subprocess.call(["python","processTests.py","-e",test])
def build(build,fvp,test,custom=None):
result = "results_%s.txt" % test
resultPath = os.path.join(build,result)
current=os.getcwd()
try:
msg("Build %s" % test)
os.chdir(build)
subprocess.call(["make"])
msg("Run %s" % test)
with open(result,"w") as results:
if custom:
subprocess.call([fvp] + custom,stdout=results)
else:
subprocess.call([fvp,"-a","Testing"],stdout=results)
finally:
os.chdir(current)
msg("Parse result for %s" % test)
subprocess.call(["python","processResult.py","-e","-r",resultPath])
def processAndRun(buildfolder,fvp,test,custom=None):
processTest(test)
build(buildfolder,fvp,test,custom=custom)
parser = argparse.ArgumentParser(description='Parse test description') parser = argparse.ArgumentParser(description='Parse test description')
parser.add_argument('-f', nargs='?',type = str, default="build_m7", help="Build folder") parser.add_argument('-i', nargs='?',type = str, default="testrunConfig.yaml",help="Config file")
parser.add_argument('-v', nargs='?',type = str, default="C:\\Program Files\\ARM\\Development Studio 2019.0\\sw\\models\\bin\\FVP_MPS2_Cortex-M7.exe", help="Fast Model") parser.add_argument('-r', nargs='?',type = str, default=root, help="Root folder")
parser.add_argument('-c', nargs='?',type = str, help="Custom args") parser.add_argument('-n', nargs='?',type = int, default=0, help="ID value when launchign in parallel")
args = parser.parse_args() args = parser.parse_args()
if args.f is not None:
BUILDFOLDER=args.f
else:
BUILDFOLDER="build_m7"
if args.v is not None:
FVP=args.v
else:
FVP="C:\\Program Files\\ARM\\Development Studio 2019.0\\sw\\models\\bin\\FVP_MPS2_Cortex-M7.exe"
with open(args.i,"r") as f:
config=yaml.safe_load(f)
if args.c: #print(config)
custom = args.c.split()
else:
custom = None
msg("Process test description file") #print(config["IMPLIEDFLAGS"])
subprocess.call(["python", "preprocess.py","-f","desc.txt"])
msg("Generate all missing C files") flags = config["FLAGS"]
subprocess.call(["python","processTests.py", "-e"]) onoffFlags = []
for f in flags:
onoffFlags.append(["-D" + f +"=ON","-D" + f +"=OFF"])
msg("Statistics Tests") allConfigs=cartesian(*onoffFlags)
processAndRun(BUILDFOLDER,FVP,"StatsTests",custom=custom)
msg("Support Tests") if DEBUGMODE:
processAndRun(BUILDFOLDER,FVP,"SupportTests",custom=custom) allConfigs=[allConfigs[0]]
msg("Support Bar Tests F32")
processAndRun(BUILDFOLDER,FVP,"SupportBarTestsF32",custom=custom)
msg("Basic Tests") failedBuild = {}
processAndRun(BUILDFOLDER,FVP,"BasicTests",custom=custom) # Test all builds
msg("Interpolation Tests") folderCreated=False
processAndRun(BUILDFOLDER,FVP,"InterpolationTests",custom=custom)
msg("Complex Tests") def logFailedBuild(root,f):
processAndRun(BUILDFOLDER,FVP,"ComplexTests",custom=custom) with open(os.path.join(fullTestFolder(root),"buildStatus_%d.txt" % args.n),"w") as status:
for build in f:
s = f[build]
if s == MAKEFAILED:
status.write("%s : Make failure\n" % build)
if s == TESTFAILED:
status.write("%s : Test failure\n" % build)
if s == FLOWFAILURE:
status.write("%s : Flow failure\n" % build)
if s == CALLFAILURE:
status.write("%s : Subprocess failure\n" % build)
msg("Fast Maths Tests")
processAndRun(BUILDFOLDER,FVP,"FastMath",custom=custom)
msg("SVM Tests") def buildAndTest(compiler):
processAndRun(BUILDFOLDER,FVP,"SVMTests",custom=custom) # Run all tests for AC6
try:
msg("Bayes Tests") for core in config['CORES']:
processAndRun(BUILDFOLDER,FVP,"BayesTests",custom=custom) configNb = 0
if compiler in config['CORES'][core]:
msg("Distance Tests") for flagConfig in allConfigs:
processAndRun(BUILDFOLDER,FVP,"DistanceTests",custom=custom) folderCreated = False
configNb = configNb + 1
msg("Filtering Tests") buildStr = "build_%s_%s_%d" % (compiler,core,configNb)
processAndRun(BUILDFOLDER,FVP,"FilteringTests",custom=custom) toUnset = None
if compiler in config['UNSET']:
msg("Matrix Tests") if core in config['UNSET'][compiler]:
processAndRun(BUILDFOLDER,FVP,"MatrixTests",custom=custom) toUnset = config['UNSET'][compiler][core]
build = BuildConfig(toUnset,args.r,
# Too many patterns to run the full transform directly buildStr,
msg("Transform Tests CF64") config['COMPILERS'][core][compiler],
processAndRun(BUILDFOLDER,FVP,"TransformCF64",custom=custom) config['TOOLCHAINS'][compiler],
config['CORES'][core][compiler],
msg("Transform Tests RF64") config["CMAKE"]
processAndRun(BUILDFOLDER,FVP,"TransformRF64",custom=custom) )
flags = []
if core in config["IMPLIEDFLAGS"]:
flags += config["IMPLIEDFLAGS"][core]
flags += flagConfig
if compiler in config["IMPLIEDFLAGS"]:
flags += config["IMPLIEDFLAGS"][compiler]
build.createFolder()
# Run all tests for the build
testStatusForThisBuild = NOTESTFAILED
try:
# This is saving the flag configuration
build.createArchive(flags)
build.createCMake(flags)
for test in config["TESTS"]:
msg(test["testName"]+"\n")
testClass=test["testClass"]
test = build.getTest(testClass)
fvp = None
if core in config['FVP']:
fvp = config['FVP'][core]
newTestStatus = test.runAndProcess(compiler,fvp)
testStatusForThisBuild = updateTestStatus(testStatusForThisBuild,newTestStatus)
if testStatusForThisBuild != NOTESTFAILED:
failedBuild[buildStr] = testStatusForThisBuild
# Final script status
testFailed = 1
build.archiveResults()
finally:
build.cleanFolder()
else:
msg("No toolchain %s for core %s" % (compiler,core))
msg("Transform Tests CF32") except TestFlowFailure as flow:
processAndRun(BUILDFOLDER,FVP,"TransformCF32",custom=custom) errorMsg("Error flow id %d\n" % flow.errorCode())
failedBuild[buildStr] = FLOWFAILURE
logFailedBuild(args.r,failedBuild)
sys.exit(1)
except CallFailure:
errorMsg("Call failure\n")
failedBuild[buildStr] = CALLFAILURE
logFailedBuild(args.r,failedBuild)
sys.exit(1)
msg("Transform Tests RF32") ############## Builds for all toolchains
processAndRun(BUILDFOLDER,FVP,"TransformRF32",custom=custom)
msg("Transform Tests CQ31") if not DEBUGMODE:
processAndRun(BUILDFOLDER,FVP,"TransformCQ31",custom=custom) preprocess()
generateAllCCode()
msg("Transform Tests RQ31") for t in config["TOOLCHAINS"]:
processAndRun(BUILDFOLDER,FVP,"TransformRQ31",custom=custom) msg("Testing toolchain %s\n" % t)
buildAndTest(t)
msg("Transform Tests CQ15") logFailedBuild(args.r,failedBuild)
processAndRun(BUILDFOLDER,FVP,"TransformCQ15",custom=custom) sys.exit(testFailed)
msg("Transform Tests RQ15")
processAndRun(BUILDFOLDER,FVP,"TransformRQ15",custom=custom)

@ -107,7 +107,7 @@ def extractBenchmarks(resultPath,benchmark,elem):
parser = argparse.ArgumentParser(description='Generate summary benchmarks') parser = argparse.ArgumentParser(description='Generate summary benchmarks')
parser.add_argument('-f', nargs='?',type = str, default="Output.pickle", help="Test description file path") parser.add_argument('-f', nargs='?',type = str, default="Output.pickle", help="Test description cache")
parser.add_argument('-b', nargs='?',type = str, default="FullBenchmark", help="Full Benchmark dir path") parser.add_argument('-b', nargs='?',type = str, default="FullBenchmark", help="Full Benchmark dir path")
# Needed to find the currentConfig.csv and know the headers # Needed to find the currentConfig.csv and know the headers
parser.add_argument('-r', nargs='?',type = str, default=None, help="Result file path") parser.add_argument('-r', nargs='?',type = str, default=None, help="Result file path")

@ -52,7 +52,8 @@ int testmain()
// An IO runner is driven by some IO // An IO runner is driven by some IO
// In future one may have a client/server runner driven // In future one may have a client/server runner driven
// by a server running on a host. // by a server running on a host.
Client::IORunner runner(&io,&mgr,Testing::kTestAndDump); //Client::IORunner runner(&io,&mgr,Testing::kTestAndDump);
Client::IORunner runner(&io,&mgr,Testing::kTestOnly);
// Root object containing all the tests // Root object containing all the tests

@ -34,6 +34,11 @@ function(compilerSpecificCompileOptions PROJECTNAME ROOT)
# Core specific config # Core specific config
if (ARM_CPU STREQUAL "cortex-m55" )
target_compile_options(${PROJECTNAME} PUBLIC "-fshort-enums")
target_compile_options(${PROJECTNAME} PUBLIC "-fshort-wchar")
endif()
if (ARM_CPU STREQUAL "cortex-m33" ) if (ARM_CPU STREQUAL "cortex-m33" )
target_compile_options(${PROJECTNAME} PUBLIC "-mfpu=fpv5-sp-d16") target_compile_options(${PROJECTNAME} PUBLIC "-mfpu=fpv5-sp-d16")
endif() endif()

@ -54,9 +54,11 @@ function(configboot PROJECT_NAME ROOT PLATFORMFOLDER)
# #
# Cortex M # Cortex M
# #
if (ARM_CPU MATCHES "^[cC]ortex-[Mm].*$") # C startup for M55 boot code
if (ARM_CPU MATCHES "^[cC]ortex-[mM]55([^0-9].*)?$")
cortexm(${CORE} ${PROJECT_NAME} ${ROOT} ${PLATFORMFOLDER} ON)
elseif (ARM_CPU MATCHES "^[cC]ortex-[Mm].*$")
cortexm(${CORE} ${PROJECT_NAME} ${ROOT} ${PLATFORMFOLDER} OFF) cortexm(${CORE} ${PROJECT_NAME} ${ROOT} ${PLATFORMFOLDER} OFF)
endif() endif()

@ -86,6 +86,16 @@ function(configcore PROJECTNAME ROOT)
# CORTEX-M # CORTEX-M
# #
if (ARM_CPU MATCHES "^[cC]ortex-[mM]55([^0-9].*)?$")
target_include_directories(${PROJECTNAME} PUBLIC "${ROOT}/CMSIS/Core/Include")
target_compile_definitions(${PROJECTNAME} PUBLIC CORTEXM)
target_compile_definitions(${PROJECTNAME} PRIVATE ARMv81MML_DSP_DP_MVE_FP)
SET(HARDFP ON)
SET(LITTLEENDIAN ON)
SET(COREID ARMv81MML_DSP_DP_MVE_FP PARENT_SCOPE)
endif()
# CORTEX-M35 # CORTEX-M35
if (ARM_CPU MATCHES "^[cC]ortex-[mM]35([^0-9].*)?$") if (ARM_CPU MATCHES "^[cC]ortex-[mM]35([^0-9].*)?$")
target_include_directories(${PROJECTNAME} PUBLIC "${ROOT}/CMSIS/Core/Include") target_include_directories(${PROJECTNAME} PUBLIC "${ROOT}/CMSIS/Core/Include")

@ -8,7 +8,7 @@ endif()
include(Toolchain/Tools) include(Toolchain/Tools)
option(OPTIMIZED "Compile for speed" ON) option(OPTIMIZED "Compile for speed" OFF)
option(AUTOVECTORIZE "Prefer autovectorizable code to one using C intrinsics" OFF) option(AUTOVECTORIZE "Prefer autovectorizable code to one using C intrinsics" OFF)
enable_language(CXX C ASM) enable_language(CXX C ASM)

@ -20,6 +20,7 @@ endif()
SET(CORE ARMCM7) SET(CORE ARMCM7)
include(platform) include(platform)
function(set_platform_core) function(set_platform_core)
@ -62,6 +63,14 @@ function(set_platform_core)
endif() endif()
###################
#
# Cortex cortex-m55
#
if (ARM_CPU MATCHES "^[cC]ortex-[mM]55([^0-9].*)?$")
SET(CORE ARMv81MML PARENT_SCOPE)
endif()
################### ###################
# #
# Cortex cortex-m23 # Cortex cortex-m23

@ -35,8 +35,8 @@ SET(CMAKE_CXX_OUTPUT_EXTENSION .o)
SET(CMAKE_ASM_OUTPUT_EXTENSION .o) SET(CMAKE_ASM_OUTPUT_EXTENSION .o)
# When library defined as STATIC, this line is needed to describe how the .a file must be # When library defined as STATIC, this line is needed to describe how the .a file must be
# create. Some changes to the line may be needed. # create. Some changes to the line may be needed.
SET(CMAKE_C_CREATE_STATIC_LIBRARY "<CMAKE_AR> -r -s <TARGET> <LINK_FLAGS> <OBJECTS>" ) SET(CMAKE_C_CREATE_STATIC_LIBRARY "<CMAKE_AR> -crs <TARGET> <LINK_FLAGS> <OBJECTS>" )
SET(CMAKE_CXX_CREATE_STATIC_LIBRARY "<CMAKE_AR> -r -s <TARGET> <LINK_FLAGS> <OBJECTS>" ) SET(CMAKE_CXX_CREATE_STATIC_LIBRARY "<CMAKE_AR> -crs <TARGET> <LINK_FLAGS> <OBJECTS>" )
set(GCC ON) set(GCC ON)
# default core # default core

Loading…
Cancel
Save