Added API descriptions for code generation of the ComputeGraph

Previous version was only detailing the options.
pull/94/head
Christophe Favergeon 3 years ago
parent 175545244d
commit f3a667f892

@ -45,7 +45,7 @@ public:
status=arm_cfft_init_f32(&sfft,inputSize>>1);
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -57,7 +57,7 @@ public:
return(0);
};
int run() override
int run() final
{
float32_t *a=this->getReadBuffer();
float32_t *b=this->getWriteBuffer();
@ -85,7 +85,7 @@ public:
status=arm_cfft_init_f16(&sfft,inputSize>>1);
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -97,7 +97,7 @@ public:
return(0);
};
int run() override
int run() final
{
float16_t *a=this->getReadBuffer();
float16_t *b=this->getWriteBuffer();
@ -124,7 +124,7 @@ public:
status=arm_cfft_init_q15(&sfft,inputSize>>1);
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -136,7 +136,7 @@ public:
return(0);
};
int run() override
int run() final
{
q15_t *a=this->getReadBuffer();
q15_t *b=this->getWriteBuffer();

@ -45,7 +45,7 @@ public:
status=arm_cfft_init_f32(&sifft,inputSize>>1);
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -57,7 +57,7 @@ public:
return(0);
};
int run() override
int run() final
{
float32_t *a=this->getReadBuffer();
float32_t *b=this->getWriteBuffer();
@ -85,7 +85,7 @@ public:
status=arm_cfft_init_f16(&sifft,inputSize>>1);
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -97,7 +97,7 @@ public:
return(0);
};
int run() override
int run() final
{
float16_t *a=this->getReadBuffer();
float16_t *b=this->getWriteBuffer();
@ -125,7 +125,7 @@ public:
status=arm_cfft_init_q15(&sifft,inputSize>>1);
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -137,7 +137,7 @@ public:
return(0);
};
int run() override
int run() final
{
q15_t *a=this->getReadBuffer();
q15_t *b=this->getWriteBuffer();

@ -40,7 +40,7 @@ public:
InterleavedStereoToMono(FIFOBase<q15_t> &src,FIFOBase<q15_t> &dst):
GenericNode<q15_t,inputSize,q15_t,outputSize>(src,dst){};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -52,7 +52,7 @@ public:
return(0);
};
int run() override
int run() final
{
q15_t *a=this->getReadBuffer();
q15_t *b=this->getWriteBuffer();
@ -72,7 +72,7 @@ public:
InterleavedStereoToMono(FIFOBase<q31_t> &src,FIFOBase<q31_t> &dst):
GenericNode<q31_t,inputSize,q31_t,outputSize>(src,dst){};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -84,7 +84,7 @@ public:
return(0);
};
int run() override
int run() final
{
q31_t *a=this->getReadBuffer();
q31_t *b=this->getWriteBuffer();
@ -104,7 +104,7 @@ public:
InterleavedStereoToMono(FIFOBase<float32_t> &src,FIFOBase<float32_t> &dst):
GenericNode<float32_t,inputSize,float32_t,outputSize>(src,dst){};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -116,7 +116,7 @@ public:
return(0);
};
int run() override
int run() final
{
float32_t *a=this->getReadBuffer();
float32_t *b=this->getWriteBuffer();

@ -58,7 +58,7 @@ public:
#endif
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -70,7 +70,7 @@ public:
return(0);
};
int run() override
int run() final
{
float32_t *a=this->getReadBuffer();
float32_t *b=this->getWriteBuffer();
@ -101,7 +101,7 @@ public:
#endif
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -113,7 +113,7 @@ public:
return(0);
};
int run() override
int run() final
{
float16_t *a=this->getReadBuffer();
float16_t *b=this->getWriteBuffer();
@ -140,7 +140,7 @@ public:
memory.resize(2*inputSize);
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -152,7 +152,7 @@ public:
return(0);
};
int run() override
int run() final
{
q31_t *a=this->getReadBuffer();
q31_t *b=this->getWriteBuffer();
@ -178,7 +178,7 @@ public:
memory.resize(2*inputSize);
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -190,7 +190,7 @@ public:
return(0);
};
int run() override
int run() final
{
q15_t *a=this->getReadBuffer();
q15_t *b=this->getWriteBuffer();

@ -35,7 +35,7 @@ class NullSink: public GenericSink<IN, inputSize>
public:
NullSink(FIFOBase<IN> &src):GenericSink<IN,inputSize>(src){};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willUnderflow()
)
@ -46,7 +46,7 @@ public:
return(0);
};
int run() override
int run() final
{
IN *b=this->getReadBuffer();

@ -40,7 +40,7 @@ public:
memory.resize(overlap);
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -52,7 +52,7 @@ public:
return(0);
};
int run() override
int run() final
{
int i;
IN *a=this->getReadBuffer();

@ -40,7 +40,7 @@ public:
memory.resize(overlap);
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -52,7 +52,7 @@ public:
return(0);
};
int run() override
int run() final
{
IN *a=this->getReadBuffer();
IN *b=this->getWriteBuffer();

@ -44,7 +44,7 @@ public:
ToComplex(FIFOBase<IN> &src,FIFOBase<IN> &dst):GenericNode<IN,inputSize,IN,outputSize>(src,dst){
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -56,7 +56,7 @@ public:
return(0);
};
int run() override
int run() final
{
IN *a=this->getReadBuffer();
IN *b=this->getWriteBuffer();

@ -43,7 +43,7 @@ public:
ToReal(FIFOBase<IN> &src,FIFOBase<IN> &dst):GenericNode<IN,inputSize,IN,outputSize>(src,dst){
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow()
@ -55,7 +55,7 @@ public:
return(0);
};
int run() override
int run() final
{
IN *a=this->getReadBuffer();
IN *b=this->getWriteBuffer();

@ -46,7 +46,7 @@ public:
Unzip(FIFOBase<IN> &src,FIFOBase<IN> &dst1,FIFOBase<IN> &dst2):
GenericNode12<IN,inputSize,IN,output1Size,IN,output2Size>(src,dst1,dst2){};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow1() ||
this->willOverflow2() ||
@ -62,7 +62,7 @@ public:
/*
2*outputSize1 == 2*outSize2 == inputSize
*/
int run() override
int run() final
{
IN *a=this->getReadBuffer();
IN *b1=this->getWriteBuffer1();

@ -39,7 +39,7 @@ public:
Zip(FIFOBase<IN> &src1,FIFOBase<IN> &src2,FIFOBase<IN> &dst):
GenericNode21<IN,inputSize,IN,inputSize,IN,outputSize>(src1,src2,dst){};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow() ||
this->willUnderflow1() ||
@ -52,7 +52,7 @@ public:
return(0);
};
int run() override
int run() final
{
IN *a1=this->getReadBuffer1();
IN *a2=this->getReadBuffer2();

@ -35,7 +35,7 @@ class FileSink: public GenericSink<IN, inputSize>
public:
FileSink(FIFOBase<IN> &src, std::string name):GenericSink<IN,inputSize>(src),output(name){};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willUnderflow()
)
@ -46,7 +46,7 @@ public:
return(0);
};
int run() override
int run() final
{
IN *b=this->getReadBuffer();

@ -46,7 +46,7 @@ public:
};
int prepareForRunning() override
int prepareForRunning() final
{
if (this->willOverflow()
)
@ -57,7 +57,7 @@ public:
return(0);
};
int run() override
int run() final
{
string str;
int i;

@ -1,6 +1,21 @@
# Options for C Code Generation
# C Code generation
## cOptionalArgs (default = "")
## API
```python
def ccode(self,directory,config=Configuration())
```
It is a method of the `Schedule` object returned by `computeSchedule`.
It generate C++ code implementing the static schedule.
* `directory` : The directory where to generate the C++ files
* `config` : An optional configuration object
## Options for C Code Generation
### cOptionalArgs (default = "")
Optional arguments to pass to the C API of the scheduler function
@ -12,7 +27,7 @@ For instance:
conf.cOptionalArgs=["int someVariable"]
```
## codeArray (default = True)
### codeArray (default = True)
When true, the scheduling is defined as an array. Otherwise, a list of function calls is generated.
@ -20,13 +35,13 @@ A list of function call may be easier to read but if the schedule is long, it is
When `codeArray` is True, the option `switchCase`can also be used.
## switchCase (default = True)
### switchCase (default = True)
`codeArray` must be true or this option is ignored.
When the schedule is encoded as an array, it can either be an array of function pointers (`switchCase` false) or an array of indexes for a state machine (`switchCase` true)
## eventRecorder (default = False)
### eventRecorder (default = False)
Enable the generation of `CMSIS EventRecorder` intrumentation in the code. The CMSIS-DSP Pack is providing definition of 3 events:
@ -34,37 +49,37 @@ Enable the generation of `CMSIS EventRecorder` intrumentation in the code. The C
* Node execution
* Error
## customCName (default = "custom.h")
### customCName (default = "custom.h")
Name of custom header in generated C code. If you use several scheduler, you may want to use different headers for each one.
## postCustomCName (default = "")
### postCustomCName (default = "")
Name of custom header in generated C code coming after all of the other includes. By default none is used.
## genericNodeCName (default = "GenericNodes.h")
### genericNodeCName (default = "GenericNodes.h")
Name of GenericNodes header in generated C code. If you use several scheduler, you may want to use different headers for each one.
## appNodesCName (default = "AppNodes.h")
### appNodesCName (default = "AppNodes.h")
Name of AppNodes header in generated C code. If you use several scheduler, you may want to use different headers for each one.
## schedulerCFileName (default = "scheduler")
### schedulerCFileName (default = "scheduler")
Name of scheduler `cpp` and header in generated C code. If you use several scheduler, you may want to use different headers for each one.
If the option is set to `xxx`, the names generated will be `xxx.cpp` and `xxx.h`
## CAPI (default = True)
### CAPI (default = True)
By default, the scheduler function is callable from C. When false, it is a standard C++ API.
## CMSISDSP (default = True)
### CMSISDSP (default = True)
If you don't use any of the datatypes or functions of the CMSIS-DSP, you don't need to include the `arm_math.h` in the scheduler file. This option can thus be set to `False`.
## asynchronous (default = False)
### asynchronous (default = False)
When true, the scheduling is for a dynamic / asynchronous flow. A node may not always produce or consume the same amount of data. As consequence, a scheduling can fail. Each node needs to implement a `prepareForRunning` function to identify and recover from FIFO underflows and overflows.
@ -76,7 +91,7 @@ Synchronous FIFOs that are just buffers will be considered as FIFOs in asynchron
More info are available in the documentation for [this mode](../Async.md).
## FIFOIncrease (default 0)
### FIFOIncrease (default 0)
In case of dynamic / asynchronous scheduling, the FIFOs may need to be bigger than what is computed assuming a static / synchronous scheduling. This option is used to increase the FIFO size. It represents a percent increase.
@ -84,7 +99,7 @@ For instance, a value of `10` means the FIFO will have their size updated from `
If the value is a `float` instead of an `int` it will be used as is. For instance, `1.1` would increase the size by `1.1` and be equivalent to the setting `10` (for 10 percent).
## asyncDefaultSkip (default True)
### asyncDefaultSkip (default True)
Behavior of a pure function (like CMSIS-DSP) in asynchronous mode. When `True`, the execution is skipped if the function can't be executed. If `False`, an error is raised.

@ -1,4 +1,4 @@
# Options for the code generator
# Common options for the code generators
Global options for the code generators. There are specific options for the C, Python and Graphviz generators. They are described in different part of the documentation.

@ -1,11 +1,24 @@
# Options for the graphviz generator
# Graphviz generation
## horizontal (default = True)
## API
```python
def graphviz(self,f,config=Configuration())
```
It is a method of the `Schedule` object returned by `computeSchedule`.
* `f` : Opened file where to write the graphviz description
* `config` : An optional configuration object
## Options for the graphviz generator
### horizontal (default = True)
Horizontal or vertical layout for the graph.
## displayFIFOBuf (default = False)
### displayFIFOBuf (default = False)
By default, the graph is displaying the FIFO sizes computed as result of the scheduling. If you want to know the FIFO variable names used in the code, you can set this option to true and the graph will display the FIFO variable names.

@ -8,15 +8,17 @@ Python APIs to describe the nodes and graph and generate the C++, Python or Grap
3. ## Scheduler
1. ### [Scheduler options](SchedOptions.md)
1. ### [Schedule computation](SchedOptions.md)
2. ### [Code generation](CodegenOptions.md)
2. ### Code generation
1. #### [C Code generation](CCodeGen.md)
1. #### [C++ Code generation](CCodeGen.md)
2. #### [Python code generation](PythonGen.md)
3. ### [Graphviz representation](GraphvizGen.md)
3. #### [Graphviz representation](GraphvizGen.md)
4. #### [Common options](CodegenOptions.md)

@ -1,18 +1,33 @@
# Options for Python code generation
# Python code generation
## pyOptionalArgs (default = "")
## API
```python
def pythoncode(self,directory,config=Configuration())
```
It is a method of the `Schedule` object returned by `computeSchedule`.
It generate Python code to implement the static schedule.
* `directory` : The directory where to generate the C++ files
* `config` : An optional configuration object
## Options for Python code generation
### pyOptionalArgs (default = "")
Optional arguments to pass to the Python version of the scheduler function
## customPythonName (default = "custom")
### customPythonName (default = "custom")
Name of custom header in generated Python code. If you use several scheduler, you may want to use different headers for each one.
## appNodesPythonName (default = "appnodes")
### appNodesPythonName (default = "appnodes")
Name of AppNodes header in generated Python code. If you use several scheduler, you may want to use different headers for each one.
## schedulerPythonFileName (default = "sched")
### schedulerPythonFileName (default = "sched")
Name of scheduler file in generated Python code. If you use several scheduler, you may want to use different headers for each one.

@ -1,4 +1,22 @@
# Options for the scheduling
# Schedule computation
## API
```python
def computeSchedule(self,config=Configuration()):
```
This is a method on the `Graph` object. It can take an optional `Configuration` object.
It returns a `Schedule` object. This object contains:
* A description of the static schedule
* The computed size of the FIFOs
* The FIFOs
* The buffers for the FIFOs (with sharing when possible if memory optimizations were enabled)
* A rewritten graph with `Duplicate` nodes inserted
## Options for the scheduling
Those options needs to be used on a configuration objects passed as argument of the scheduling function. For instance:
@ -10,22 +28,22 @@ sched = g.computeSchedule(config = conf)
Note that the configuration object also contain options for the code generators. They are described in different part of the documentation.
## memoryOptimization (default = False)
### memoryOptimization (default = False)
When the amount of data written to a FIFO and read from the FIFO is the same, the FIFO is just an array. In this case, depending on the scheduling, the memory used by different arrays may be reused if those arrays are not needed at the same time.
This option is enabling an analysis to optimize the memory usage by merging some buffers when it is possible.
## sinkPriority (default = True)
### sinkPriority (default = True)
Try to prioritize the scheduling of the sinks to minimize the latency between sources and sinks.
When this option is enabled, the tool may not be able to find a schedule in all cases. If it can't find a schedule, it will raise a `DeadLock` exception.
## displayFIFOSizes (default = False)
### displayFIFOSizes (default = False)
During computation of the schedule, the evolution of the FIFO sizes is generated on `stdout`.
## dumpSchedule (default = False)
### dumpSchedule (default = False)
During computation of the schedule, the human readable schedule is generated on `stdout`.

@ -40,7 +40,7 @@ This will generate the following files:
* `generated/scheduler.h`
* `simple.dot` (the graphviz representation of the graph)
A graphical representation of the graph is generated in graphviz dot format. If you have graphviz installed, you can generated a `png` file representing the graph with:
A graphical representation of the graph is generated in graphviz dot format. If you have graphviz installed, you can generate a `png` file representing the graph with:
`dot -Tpng -o simple.png simple.dot`
@ -297,7 +297,7 @@ class Sink(GenericSink):
For each node datatype defined in the Python side, we need to provide an implementation on the C++ side.
The C++ class templates that we will define are just wrappers around algorithms. In this example, since the algorithms are very simple, they have been implemented directly in the wrappers. It does not have to be the case for a more complex algorithms. The C++ template are serving the same purposes as the Python definitions : defining the datatype of a node:
The C++ class templates that we will define are just wrappers around algorithms. In this example, since the algorithms are very simple, they have been implemented directly in the wrappers. It does not have to be the case for a more complex algorithms. The C++ template are serving the same purposes as the Python definitions : defining the datatype of a node.
* The number of IOs
* Their datatype
@ -306,13 +306,13 @@ The C++ class templates that we will define are just wrappers around algorithms.
The C++ template is also providing some entry points to enable the scheduler to do its works :
* Access to the FIFOs
* Running of the code
* Running the code
Those C++ templates should thus be very light and that's why we prefer to speak of C++ wrappers rather than C++ objects. The code for the algorithms will generally be outside of those wrappers (and will often be in C).
Those templates are defined in a file `AppNodes.h` included by the scheduler (it is possible to change the name from the Python script). This file must be provided by the user of the ComputeGraph framework.
### The source C++ wrapper
### The C++ wrapper for Source
First, like with Python, we need to define the datatype:
@ -407,12 +407,13 @@ So, although we have not provided a specific implementation of the template, thi
The return of the function `run` is to inform the scheduler that no error occurred. In synchronous mode, errors (like underflow or overflow) cannot occur due to the scheduling but only because of a broken real time. So any error returned by a node will stop the scheduling.
### The processing node
### The C++ wrapper for the Processing node
It is similar but now we have one input and one output. The template is:
```C++
template<typename IN, int inputSize,typename OUT,int outputSize>
template<typename IN, int inputSize,
typename OUT,int outputSize>
class ProcessingNode;
```
@ -425,7 +426,8 @@ Here is how we implement a specialized version of the template.
First we define the arguments of the template. It is no more generic. We have to give all the arguments:
```C++
class ProcessingNode<IN,inputOutputSize,IN,inputOutputSize>
class ProcessingNode<IN,inputOutputSize,
IN,inputOutputSize>
```
This enforces that the `OUT` datatype is equal to the `IN` datatype since `IN` is used in both arguments.
@ -436,26 +438,31 @@ Since the arguments of the template are still not fully specified and there is s
```C++
template<typename IN, int inputOutputSize>
class ProcessingNode<IN,inputOutputSize,IN,inputOutputSize>
class ProcessingNode<IN,inputOutputSize,
IN,inputOutputSize>
```
And finally, like before, we inherit from `GenericNode` using the same template arguments:
```C++
template<typename IN, int inputOutputSize>
class ProcessingNode<IN,inputOutputSize,IN,inputOutputSize>:
public GenericNode<IN,inputOutputSize,IN,inputOutputSize>
class ProcessingNode<IN,inputOutputSize,
IN,inputOutputSize>:
public GenericNode<IN,inputOutputSize,
IN,inputOutputSize>
```
To be compared with the generic implementation:
```C++
template<typename IN, int inputSize, typename OUT, int outputSize>
template<typename IN, int inputSize,
typename OUT, int outputSize>
class ProcessingNode:
public GenericNode<IN,inputSize,OUT,outputSize>
public GenericNode<IN,inputSize,
OUT,outputSize>
```
In the generic implementation we do not use `<>` after `ProcessingNode` since we do not specify specific values of the template arguments.
In a generic implementation, we do not use `<>` after `ProcessingNode` since we do not specify specific values of the template arguments.
It is possible to have several specialization of the same class.
@ -463,15 +470,17 @@ One could also have another specialization like:
```C++
template<int inputOutputSize>
class ProcessingNode<q15_t,inputOutputSize,q15_t,inputOutputSize>:
public GenericNode<q15_tIN,inputOutputSize,q15_t,inputOutputSize>
class ProcessingNode<q15_t,inputOutputSize,
q15_t,inputOutputSize>:
public GenericNode<q15_tIN,inputOutputSize,
q15_t,inputOutputSize>
```
Just working `q15_t` datatype
The `run` function of the processing node has access to `getReadBuffer` and `getWriteBuffer` to access to the FIFO buffers.
### The sink
### The C++ wrapper for the Sink
The definition of the `Sink` should be clear now:

Loading…
Cancel
Save