Skip to content

Commit

Permalink
support Cortex-M + ESP, support [email protected] on Cortex-M, support [email protected].…
Browse files Browse the repository at this point in the history
…1 on ESP32
  • Loading branch information
eloquentarduino committed Oct 31, 2021
1 parent 1cfd8b6 commit 5fb8f3f
Show file tree
Hide file tree
Showing 509 changed files with 102,741 additions and 4,338 deletions.
Binary file modified .DS_Store
Binary file not shown.
23 changes: 20 additions & 3 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,14 @@ Clone this repo in you Arduino libraries folder.
git clone https://github.com/eloquentarduino/EloquentTinyML.git
```

## Export TensorFlow Lite model

To run a model on your microcontroller, you should first have a model.

I suggest you use [`tinymlgen`](https://github.com/eloquentarduino/tinymlgen) to complete this step:
it will export your TensorFlow Lite model to a C array ready to be loaded
by this library.


## Use

Expand All @@ -25,15 +33,15 @@ git clone https://github.com/eloquentarduino/EloquentTinyML.git
#define NUMBER_OF_OUTPUTS 1
#define TENSOR_ARENA_SIZE 2*1024

Eloquent::TinyML::TinyML<
Eloquent::TinyML::TfLite<
NUMBER_OF_INPUTS,
NUMBER_OF_OUTPUTS,
TENSOR_ARENA_SIZE> ml;


void setup() {
Serial.begin(115200);
ml.begin(sine_model_quantized_tflite);
ml.begin(sine_model);
}

void loop() {
Expand All @@ -50,4 +58,13 @@ void loop() {
Serial.println(predicted);
delay(1000);
}
```
```

## Compatibility

Latest version of this library (2.4.0) is compatible with Cortex-M and ESP32 chips and is built starting from:

- [Arduino_TensorFlowLite library version 2.4.0-ALPHA](https://www.tensorflow.org/lite/microcontrollers/overview)
- [TensorFlowLite_ESP32 version 0.9.0](https://github.com/tanakamasayuki/Arduino_TensorFlowLite_ESP32)

ESP32 support is stuck at TensorFlow 2.1.1 at the moment.
28 changes: 15 additions & 13 deletions examples/SineExample/SineExample.ino
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@

#define NUMBER_OF_INPUTS 1
#define NUMBER_OF_OUTPUTS 1
// in future projects you may need to tweek this value: it's a trial and error process
// in future projects you may need to tweak this value: it's a trial and error process
#define TENSOR_ARENA_SIZE 2*1024

Eloquent::TinyML::TfLite<NUMBER_OF_INPUTS, NUMBER_OF_OUTPUTS, TENSOR_ARENA_SIZE> ml;
Expand All @@ -16,17 +16,19 @@ void setup() {
}

void loop() {
// pick up a random x and predict its sine
float x = 3.14 * random(100) / 100;
float y = sin(x);
float input[1] = { x };
float predicted = ml.predict(input);
for (float i = 0; i < 10; i++) {
// pick x from 0 to PI
float x = 3.14 * i / 10;
float y = sin(x);
float input[1] = { x };
float predicted = ml.predict(input);

Serial.print("sin(");
Serial.print(x);
Serial.print(") = ");
Serial.print(y);
Serial.print("\t predicted: ");
Serial.println(predicted);
delay(1000);
Serial.print("sin(");
Serial.print(x);
Serial.print(") = ");
Serial.print(y);
Serial.print("\t predicted: ");
Serial.println(predicted);
delay(1000);
}
}
2 changes: 1 addition & 1 deletion library.json
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"type": "git",
"url": "https://github.com/eloquentarduino/EloquentTinyML"
},
"version": "0.0.10",
"version": "2.4.0",
"authors": {
"name": "Simone Salerno",
"url": "https://github.com/eloquentarduino"
Expand Down
2 changes: 1 addition & 1 deletion library.properties
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
name=EloquentTinyML
version=0.0.10
version=2.4.0
author=Simone Salerno,[email protected]
maintainer=Simone Salerno,[email protected]
sentence=An eloquent interface to Tensorflow Lite for Microcontrollers
Expand Down
Binary file modified src/.DS_Store
Binary file not shown.
229 changes: 7 additions & 222 deletions src/EloquentTinyML.h
Original file line number Diff line number Diff line change
@@ -1,237 +1,22 @@
#pragma once

#include <Arduino.h>
#include <math.h>

#ifdef max
#define REDEFINE_MAX
#undef max
#undef min
#endif

#include <math.h>
#include "tensorflow/lite/version.h"
#include "tensorflow/lite/micro/kernels/all_ops_resolver.h"
#include "tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow/lite/micro/micro_interpreter.h"

#if defined(ESP32)
#include "TfLiteESP32.h"
#else
#include "TfLiteARM.h"
#endif

#ifdef REDEFINE_MAX
#define max(a,b) ((a)>(b)?(a):(b))
#define min(a,b) ((a)<(b)?(a):(b))
#endif


namespace Eloquent {
namespace TinyML {

enum TfLiteError {
OK,
VERSION_MISMATCH,
CANNOT_ALLOCATE_TENSORS,
NOT_INITIALIZED,
INVOKE_ERROR
};

/**
* Eloquent interface to Tensorflow Lite for Microcontrollers
*
* @tparam inputSize
* @tparam outputSize
* @tparam tensorArenaSize how much memory to allocate to the tensors
*/
template<size_t inputSize, size_t outputSize, size_t tensorArenaSize>
class TfLite {
public:
/**
* Contructor
* @param modelData a model as exported by tinymlgen
*/
TfLite() :
failed(false) {
}

~TfLite() {
delete reporter;
delete interpreter;
}

/**
* Inizialize NN
*
* @param modelData
* @return
*/
bool begin(const unsigned char *modelData) {
tflite::ops::micro::AllOpsResolver resolver;
reporter = new tflite::MicroErrorReporter();

model = tflite::GetModel(modelData);

// assert model version and runtime version match
if (model->version() != TFLITE_SCHEMA_VERSION) {
failed = true;
error = VERSION_MISMATCH;

reporter->Report(
"Model provided is schema version %d not equal "
"to supported version %d.",
model->version(), TFLITE_SCHEMA_VERSION);

return false;
}

interpreter = new tflite::MicroInterpreter(model, resolver, tensorArena, tensorArenaSize, reporter);

if (interpreter->AllocateTensors() != kTfLiteOk) {
failed = true;
error = CANNOT_ALLOCATE_TENSORS;

return false;
}

input = interpreter->input(0);
output = interpreter->output(0);
error = OK;

return true;
}

/**
* Test if the initialization completed fine
*/
bool initialized() {
return !failed;
}

/**
*
* @param input
* @param output
* @return
*/
uint8_t predict(uint8_t *input, uint8_t *output = NULL) {
// abort if initialization failed
if (!initialized())
return sqrt(-1);

memcpy(this->input->data.uint8, input, sizeof(uint8_t) * inputSize);

if (interpreter->Invoke() != kTfLiteOk) {
reporter->Report("Inference failed");

return sqrt(-1);
}

// copy output
if (output != NULL) {
for (uint16_t i = 0; i < outputSize; i++)
output[i] = this->output->data.uint8[i];
}

return this->output->data.uint8[0];
}

/**
* Run inference
* @return output[0], so you can use it directly if it's the only output
*/
float predict(float *input, float *output = NULL) {
// abort if initialization failed
if (!initialized()) {
error = NOT_INITIALIZED;

return sqrt(-1);
}

// copy input
for (size_t i = 0; i < inputSize; i++)
this->input->data.f[i] = input[i];

if (interpreter->Invoke() != kTfLiteOk) {
error = INVOKE_ERROR;
reporter->Report("Inference failed");

return sqrt(-1);
}

// copy output
if (output != NULL) {
for (uint16_t i = 0; i < outputSize; i++)
output[i] = this->output->data.f[i];
}

return this->output->data.f[0];
}

/**
* Predict class
* @param input
* @return
*/
uint8_t predictClass(float *input) {
float output[outputSize];

predict(input, output);

return probaToClass(output);
}

/**
* Get class with highest probability
* @param output
* @return
*/
uint8_t probaToClass(float *output) {
uint8_t classIdx = 0;
float maxProba = output[0];

for (uint8_t i = 1; i < outputSize; i++) {
if (output[i] > maxProba) {
classIdx = i;
maxProba = output[i];
}
}

return classIdx;
}

/**
* Get error
* @return
*/
TfLiteError getError() {
return error;
}

/**
* Get error message
* @return
*/
const char* errorMessage() {
switch (error) {
case OK:
return "No error";
case VERSION_MISMATCH:
return "Version mismatch";
case CANNOT_ALLOCATE_TENSORS:
return "Cannot allocate tensors";
case NOT_INITIALIZED:
return "Interpreter has not been initialized";
case INVOKE_ERROR:
return "Interpreter invoke() returned an error";
default:
return "Unknown error";
}
}

protected:
bool failed;
TfLiteError error;
uint8_t tensorArena[tensorArenaSize];
tflite::ErrorReporter *reporter;
tflite::MicroInterpreter *interpreter;
TfLiteTensor *input;
TfLiteTensor *output;
const tflite::Model *model;
};
}
}
28 changes: 28 additions & 0 deletions src/TfLiteARM.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
//
// Created by Simone on 28/10/2021.
//

#ifndef ELOQUENTTINYML_TFLITEARM_H
#define ELOQUENTTINYML_TFLITEARM_H

#include "tensorflow_arm/tensorflow/lite/version.h"
#include "tensorflow_arm/tensorflow/lite/schema/schema_generated.h"
#include "tensorflow_arm/tensorflow/lite/micro/all_ops_resolver.h"
#include "tensorflow_arm/tensorflow/lite/micro/micro_error_reporter.h"
#include "tensorflow_arm/tensorflow/lite/micro/micro_interpreter.h"
#include "TfLiteAbstract.h"


namespace Eloquent {
namespace TinyML {

/**
* Run TensorFlow Lite models on ARM
*/
template<size_t inputSize, size_t outputSize, size_t tensorArenaSize>
class TfLite : public TfLiteAbstract<tflite::AllOpsResolver, inputSize, outputSize, tensorArenaSize> {
};
}
}

#endif //ELOQUENTTINYML_TFLITEESP32_H
Loading

0 comments on commit 5fb8f3f

Please sign in to comment.