Skip to content

Instantly share code, notes, and snippets.

@dansitu
Created November 18, 2022 15:49
Show Gist options
  • Save dansitu/6a7233bbdd2c2c83bf526c7c6d0ba320 to your computer and use it in GitHub Desktop.
Save dansitu/6a7233bbdd2c2c83bf526c7c6d0ba320 to your computer and use it in GitHub Desktop.
/* Generated by Edge Impulse
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
// Generated on: 17.11.2022 19:37:29
#include <stdio.h>
#include <stdlib.h>
#include "edge-impulse-sdk/tensorflow/lite/c/builtin_op_data.h"
#include "edge-impulse-sdk/tensorflow/lite/c/common.h"
#include "edge-impulse-sdk/tensorflow/lite/micro/micro_mutable_op_resolver.h"
#include "edge-impulse-sdk/porting/ei_classifier_porting.h"
#if EI_CLASSIFIER_PRINT_STATE
#if defined(__cplusplus) && EI_C_LINKAGE == 1
extern "C" {
extern void ei_printf(const char *format, ...);
}
#else
extern void ei_printf(const char *format, ...);
#endif
#endif
#if defined __GNUC__
#define ALIGN(X) __attribute__((aligned(X)))
#elif defined _MSC_VER
#define ALIGN(X) __declspec(align(X))
#elif defined __TASKING__
#define ALIGN(X) __align(X)
#endif
#ifndef EI_MAX_SCRATCH_BUFFER_COUNT
#define EI_MAX_SCRATCH_BUFFER_COUNT 4
#endif // EI_MAX_SCRATCH_BUFFER_COUNT
#ifndef EI_MAX_OVERFLOW_BUFFER_COUNT
#define EI_MAX_OVERFLOW_BUFFER_COUNT 10
#endif // EI_MAX_OVERFLOW_BUFFER_COUNT
using namespace tflite;
using namespace tflite::ops;
using namespace tflite::ops::micro;
namespace {
constexpr int kTensorArenaSize = 272;
#if defined(EI_CLASSIFIER_ALLOCATION_STATIC)
uint8_t tensor_arena[kTensorArenaSize] ALIGN(16);
#elif defined(EI_CLASSIFIER_ALLOCATION_STATIC_HIMAX)
#pragma Bss(".tensor_arena")
uint8_t tensor_arena[kTensorArenaSize] ALIGN(16);
#pragma Bss()
#elif defined(EI_CLASSIFIER_ALLOCATION_STATIC_HIMAX_GNU)
uint8_t tensor_arena[kTensorArenaSize] ALIGN(16) __attribute__((section(".tensor_arena")));
#else
#define EI_CLASSIFIER_ALLOCATION_HEAP 1
uint8_t* tensor_arena = NULL;
#endif
static uint8_t* tensor_boundary;
static uint8_t* current_location;
template <int SZ, class T> struct TfArray {
int sz; T elem[SZ];
};
enum used_operators_e {
OP_FULLY_CONNECTED, OP_SOFTMAX, OP_LAST
};
struct TensorInfo_t { // subset of TfLiteTensor used for initialization from constant memory
TfLiteAllocationType allocation_type;
TfLiteType type;
void* data;
TfLiteIntArray* dims;
size_t bytes;
TfLiteQuantization quantization;
};
struct NodeInfo_t { // subset of TfLiteNode used for initialization from constant memory
struct TfLiteIntArray* inputs;
struct TfLiteIntArray* outputs;
void* builtin_data;
used_operators_e used_op_index;
};
TfLiteContext ctx{};
TfLiteTensor tflTensors[11];
TfLiteEvalTensor tflEvalTensors[11];
TfLiteRegistration registrations[OP_LAST];
TfLiteNode tflNodes[4];
const TfArray<2, int> tensor_dimension0 = { 2, { 1,33 } };
const TfArray<1, float> quant0_scale = { 1, { 0.11748567223548889, } };
const TfArray<1, int> quant0_zero = { 1, { -128 } };
const TfLiteAffineQuantization quant0 = { (TfLiteFloatArray*)&quant0_scale, (TfLiteIntArray*)&quant0_zero, 0 };
const ALIGN(16) int8_t tensor_data1[20*33] = {
-32, 7, -7, 9, 30, 13, -18, -60, -79, -7, 75, 48, 20, 65, 29, 33, 26, -10, -13, 6, 127, -17, 79, -12, -13, 5, -20, 31, -17, -28, -68, -71, 41,
8, 1, 0, -32, 7, 41, 24, 32, -32, 27, 26, -30, -1, -33, -7, -18, -15, -40, 25, 11, -8, -31, -31, -28, 5, -25, 10, 17, -22, -8, 38, 72, -37,
67, -39, 10, 37, 2, -2, -56, 14, -41, 34, 15, 54, -18, -8, 21, -43, -4, -6, 102, 32, 80, 96, -79, 10, -26, 30, -3, -3, 4, -72, -80, -1, -95,
-111, -17, -42, 25, -4, 8, -20, 6, -41, -36, -58, -79, 8, 24, -1, -46, -3, -26, 30, 23, -55, -48, 70, 17, 6, 22, 1, -24, 26, 50, 37, 73, 28,
28, 27, 2, 33, 4, 31, -31, 18, 27, 27, 4, 11, 12, 32, -14, 29, -5, -5, 27, 12, -17, -45, 2, 19, 2, -2, 42, 26, 46, 38, 41, 1, -13,
77, -10, 7, -6, 27, 25, 11, 24, 24, 16, 57, -117, 31, -46, 26, 22, -8, 32, -1, -13, -123, -91, 25, -17, -13, 16, 31, -7, 43, 52, 55, 86, 48,
-73, 29, -11, 6, -22, 13, 23, 16, -44, -34, -81, -67, -31, -1, 32, 3, 4, 18, 33, -4, -37, -22, 63, 4, 28, 7, -10, 3, 17, 47, 71, 57, 0,
-5, 40, -21, 16, -21, 48, 26, -45, 29, -25, 42, -13, -23, -13, -19, 5, 2, -9, 12, -14, -26, -30, -36, -13, -7, -22, 31, 23, 13, 18, -40, -3, -24,
9, 21, 9, -38, 21, 15, -16, -12, 9, -38, -44, 59, 8, -7, -37, -24, 8, -10, 71, 4, 10, 9, -33, -17, -24, 31, -40, 9, 22, -24, -38, -18, -48,
63, 28, 20, 19, -6, 7, 0, -23, 11, 21, 68, -22, 5, 0, 17, 3, 10, 52, 67, -76, -49, -48, 7, 24, 25, -6, -5, 0, 48, -1, -63, -15, 23,
6, -34, 28, -12, 51, 2, 33, 33, 5, 27, -6, 40, 10, 79, 9, -5, -1, 37, 48, -2, -92, -45, 18, -4, -34, 29, -39, 26, 15, -7, 106, 39, -41,
25, 17, -5, -11, -7, 30, 8, 27, 26, 30, -32, 51, 7, -12, -15, 14, 3, 13, 45, 35, 34, -17, -2, 15, 30, -34, -12, 22, -36, 67, -13, 48, -38,
31, 27, 20, -14, 26, -6, 12, 0, 7, 48, -3, 2, -30, 47, 38, -22, 31, 4, -13, 7, -29, 4, 16, 3, -25, 13, -23, 17, 38, -20, 52, 1, -34,
5, -6, 9, 16, -1, -3, 18, 21, 48, 53, 9, 52, 8, -18, -9, 10, 0, -24, -29, 73, 22, -21, 19, -20, -37, 21, 17, -9, 28, -19, -30, 49, 8,
-8, -30, 53, 1, 24, -2, 24, 43, 19, 0, -21, 0, -17, -3, 17, 4, 4, -9, -7, -16, 18, 21, -33, 11, 22, 15, -29, 3, -13, -11, -10, -21, 9,
-29, 55, 11, 26, 9, 35, -11, -26, -41, -3, -49, 99, 16, 62, 48, -4, 8, 35, 11, 81, 84, -42, 50, 26, 18, -11, 30, 4, 7, 52, 13, 22, -4,
6, 41, 24, 11, 5, 10, 4, -35, -1, 29, 47, -102, -10, -34, -11, 40, 25, 33, -30, -52, -113, -77, 21, 28, 31, 11, 7, 12, 57, 98, 48, 56, 47,
-59, -4, -32, 3, 38, -3, 27, 58, -29, -44, -46, -1, -1, -7, -1, -8, 7, -14, 26, 57, -64, 26, 32, -7, 42, -10, -31, 24, 22, 75, 81, 77, 17,
25, 8, 51, 23, 48, 26, 4, -42, -6, 3, 22, 6, 27, 23, -1, 64, -14, 16, -65, -4, 19, -53, 26, 38, 9, -32, 49, -6, 27, 36, 55, 58, 86,
-27, 9, 15, 10, 3, -3, -4, -44, 30, 31, -16, 27, -3, -47, 21, -14, -1, -51, 11, 1, 9, 57, 53, -48, 22, 25, -2, 29, -43, -42, -43, -37, 55,
};
const TfArray<2, int> tensor_dimension1 = { 2, { 20,33 } };
const TfArray<1, float> quant1_scale = { 1, { 0.0081179020926356316, } };
const TfArray<1, int> quant1_zero = { 1, { 0 } };
const TfLiteAffineQuantization quant1 = { (TfLiteFloatArray*)&quant1_scale, (TfLiteIntArray*)&quant1_zero, 0 };
const ALIGN(16) int32_t tensor_data2[20] = { 615, 6, 594, 8, -326, 210, -329, 180, 145, 399, -590, -150, -341, -174, 1, -153, 67, -345, 4, 526, };
const TfArray<1, int> tensor_dimension2 = { 1, { 20 } };
const TfArray<1, float> quant2_scale = { 1, { 0.00095373718068003654, } };
const TfArray<1, int> quant2_zero = { 1, { 0 } };
const TfLiteAffineQuantization quant2 = { (TfLiteFloatArray*)&quant2_scale, (TfLiteIntArray*)&quant2_zero, 0 };
const ALIGN(16) int8_t tensor_data3[10*20] = {
-45, 23, 11, 6, 60, -20, -17, 17, -11, -37, 19, 11, 57, 55, 41, -6, -52, 8, -24, 4,
-41, 4, -26, 9, 6, -5, 25, -23, -20, -43, -1, -43, -20, -17, -48, -50, -33, 23, 18, -53,
-43, 50, 99, -83, 71, 60, -33, 2, 104, 91, -28, 18, 44, -4, 50, -19, -15, -3, -31, 1,
-39, -6, -26, -53, 16, 17, 37, 33, 25, -18, 127, 19, 42, -29, -28, 83, 30, 34, 56, -63,
23, 9, -59, -59, -54, 90, 3, 6, -59, 64, -38, -31, -21, 17, -37, 17, 36, -59, 64, 47,
79, 31, -22, 48, -45, -42, 23, -28, 49, -62, -36, 43, -54, 20, -35, 80, -69, 22, -30, 40,
69, -30, -35, -16, 28, -43, -32, 10, 3, 47, -7, 25, -35, -11, -19, 40, 4, -49, 37, -27,
-5, 41, 64, -55, -6, -21, -36, -47, 51, 44, 45, 40, -15, -13, -20, 12, -35, -2, -24, 15,
-62, 40, -28, 15, 54, -2, 24, -50, -17, -32, 52, 62, 63, 77, 27, 26, -33, 68, 14, -16,
-19, 29, 22, 98, -46, 48, 86, -28, 46, -29, 3, 11, -53, -33, -53, -1, 89, 97, -12, 37,
};
const TfArray<2, int> tensor_dimension3 = { 2, { 10,20 } };
const TfArray<1, float> quant3_scale = { 1, { 0.0082857050001621246, } };
const TfArray<1, int> quant3_zero = { 1, { 0 } };
const TfLiteAffineQuantization quant3 = { (TfLiteFloatArray*)&quant3_scale, (TfLiteIntArray*)&quant3_zero, 0 };
const ALIGN(16) int32_t tensor_data4[10] = { -99, -114, 692, -913, 874, 404, -305, 121, -611, 77, };
const TfArray<1, int> tensor_dimension4 = { 1, { 10 } };
const TfArray<1, float> quant4_scale = { 1, { 0.00065062998328357935, } };
const TfArray<1, int> quant4_zero = { 1, { 0 } };
const TfLiteAffineQuantization quant4 = { (TfLiteFloatArray*)&quant4_scale, (TfLiteIntArray*)&quant4_zero, 0 };
const ALIGN(16) int8_t tensor_data5[6*10] = {
-24, -73, 57, 39, -104, -51, 43, 91, -48, -71,
30, 53, 20, 10, 95, -102, 47, -115, -77, -31,
-113, 3, 94, -105, 16, 79, -115, 52, -127, 120,
-66, 57, -97, 17, 10, 108, 89, -46, 0, -122,
-19, -41, -57, 57, -32, -4, 10, 10, 64, 60,
37, 33, 50, -14, -75, -70, -60, -37, 90, -22,
};
const TfArray<2, int> tensor_dimension5 = { 2, { 6,10 } };
const TfArray<1, float> quant5_scale = { 1, { 0.0081817712634801865, } };
const TfArray<1, int> quant5_zero = { 1, { 0 } };
const TfLiteAffineQuantization quant5 = { (TfLiteFloatArray*)&quant5_scale, (TfLiteIntArray*)&quant5_zero, 0 };
const ALIGN(16) int32_t tensor_data6[6] = { -120, 76, 391, 274, -312, -119, };
const TfArray<1, int> tensor_dimension6 = { 1, { 6 } };
const TfArray<1, float> quant6_scale = { 1, { 0.0012734470656141639, } };
const TfArray<1, int> quant6_zero = { 1, { 0 } };
const TfLiteAffineQuantization quant6 = { (TfLiteFloatArray*)&quant6_scale, (TfLiteIntArray*)&quant6_zero, 0 };
const TfArray<2, int> tensor_dimension7 = { 2, { 1,20 } };
const TfArray<1, float> quant7_scale = { 1, { 0.078524395823478699, } };
const TfArray<1, int> quant7_zero = { 1, { -128 } };
const TfLiteAffineQuantization quant7 = { (TfLiteFloatArray*)&quant7_scale, (TfLiteIntArray*)&quant7_zero, 0 };
const TfArray<2, int> tensor_dimension8 = { 2, { 1,10 } };
const TfArray<1, float> quant8_scale = { 1, { 0.15564441680908203, } };
const TfArray<1, int> quant8_zero = { 1, { -128 } };
const TfLiteAffineQuantization quant8 = { (TfLiteFloatArray*)&quant8_scale, (TfLiteIntArray*)&quant8_zero, 0 };
const TfArray<2, int> tensor_dimension9 = { 2, { 1,6 } };
const TfArray<1, float> quant9_scale = { 1, { 0.31460750102996826, } };
const TfArray<1, int> quant9_zero = { 1, { 8 } };
const TfLiteAffineQuantization quant9 = { (TfLiteFloatArray*)&quant9_scale, (TfLiteIntArray*)&quant9_zero, 0 };
const TfArray<2, int> tensor_dimension10 = { 2, { 1,6 } };
const TfArray<1, float> quant10_scale = { 1, { 0.00390625, } };
const TfArray<1, int> quant10_zero = { 1, { -128 } };
const TfLiteAffineQuantization quant10 = { (TfLiteFloatArray*)&quant10_scale, (TfLiteIntArray*)&quant10_zero, 0 };
const TfLiteFullyConnectedParams opdata0 = { kTfLiteActRelu, kTfLiteFullyConnectedWeightsFormatDefault, false, false };
const TfArray<3, int> inputs0 = { 3, { 0,1,2 } };
const TfArray<1, int> outputs0 = { 1, { 7 } };
const TfLiteFullyConnectedParams opdata1 = { kTfLiteActRelu, kTfLiteFullyConnectedWeightsFormatDefault, false, false };
const TfArray<3, int> inputs1 = { 3, { 7,3,4 } };
const TfArray<1, int> outputs1 = { 1, { 8 } };
const TfLiteFullyConnectedParams opdata2 = { kTfLiteActNone, kTfLiteFullyConnectedWeightsFormatDefault, false, false };
const TfArray<3, int> inputs2 = { 3, { 8,5,6 } };
const TfArray<1, int> outputs2 = { 1, { 9 } };
const TfLiteSoftmaxParams opdata3 = { 1 };
const TfArray<1, int> inputs3 = { 1, { 9 } };
const TfArray<1, int> outputs3 = { 1, { 10 } };
const TensorInfo_t tensorData[] = {
{ kTfLiteArenaRw, kTfLiteInt8, tensor_arena + 0, (TfLiteIntArray*)&tensor_dimension0, 33, {kTfLiteAffineQuantization, const_cast<void*>(static_cast<const void*>(&quant0))}, },
{ kTfLiteMmapRo, kTfLiteInt8, (void*)tensor_data1, (TfLiteIntArray*)&tensor_dimension1, 660, {kTfLiteAffineQuantization, const_cast<void*>(static_cast<const void*>(&quant1))}, },
{ kTfLiteMmapRo, kTfLiteInt32, (void*)tensor_data2, (TfLiteIntArray*)&tensor_dimension2, 80, {kTfLiteAffineQuantization, const_cast<void*>(static_cast<const void*>(&quant2))}, },
{ kTfLiteMmapRo, kTfLiteInt8, (void*)tensor_data3, (TfLiteIntArray*)&tensor_dimension3, 200, {kTfLiteAffineQuantization, const_cast<void*>(static_cast<const void*>(&quant3))}, },
{ kTfLiteMmapRo, kTfLiteInt32, (void*)tensor_data4, (TfLiteIntArray*)&tensor_dimension4, 40, {kTfLiteAffineQuantization, const_cast<void*>(static_cast<const void*>(&quant4))}, },
{ kTfLiteMmapRo, kTfLiteInt8, (void*)tensor_data5, (TfLiteIntArray*)&tensor_dimension5, 60, {kTfLiteAffineQuantization, const_cast<void*>(static_cast<const void*>(&quant5))}, },
{ kTfLiteMmapRo, kTfLiteInt32, (void*)tensor_data6, (TfLiteIntArray*)&tensor_dimension6, 24, {kTfLiteAffineQuantization, const_cast<void*>(static_cast<const void*>(&quant6))}, },
{ kTfLiteArenaRw, kTfLiteInt8, tensor_arena + 48, (TfLiteIntArray*)&tensor_dimension7, 20, {kTfLiteAffineQuantization, const_cast<void*>(static_cast<const void*>(&quant7))}, },
{ kTfLiteArenaRw, kTfLiteInt8, tensor_arena + 0, (TfLiteIntArray*)&tensor_dimension8, 10, {kTfLiteAffineQuantization, const_cast<void*>(static_cast<const void*>(&quant8))}, },
{ kTfLiteArenaRw, kTfLiteInt8, tensor_arena + 16, (TfLiteIntArray*)&tensor_dimension9, 6, {kTfLiteAffineQuantization, const_cast<void*>(static_cast<const void*>(&quant9))}, },
{ kTfLiteArenaRw, kTfLiteInt8, tensor_arena + 0, (TfLiteIntArray*)&tensor_dimension10, 6, {kTfLiteAffineQuantization, const_cast<void*>(static_cast<const void*>(&quant10))}, },
};const NodeInfo_t nodeData[] = {
{ (TfLiteIntArray*)&inputs0, (TfLiteIntArray*)&outputs0, const_cast<void*>(static_cast<const void*>(&opdata0)), OP_FULLY_CONNECTED, },
{ (TfLiteIntArray*)&inputs1, (TfLiteIntArray*)&outputs1, const_cast<void*>(static_cast<const void*>(&opdata1)), OP_FULLY_CONNECTED, },
{ (TfLiteIntArray*)&inputs2, (TfLiteIntArray*)&outputs2, const_cast<void*>(static_cast<const void*>(&opdata2)), OP_FULLY_CONNECTED, },
{ (TfLiteIntArray*)&inputs3, (TfLiteIntArray*)&outputs3, const_cast<void*>(static_cast<const void*>(&opdata3)), OP_SOFTMAX, },
};
static void* overflow_buffers[EI_MAX_OVERFLOW_BUFFER_COUNT];
static size_t overflow_buffers_ix = 0;
static void * AllocatePersistentBuffer(struct TfLiteContext* ctx,
size_t bytes) {
void *ptr;
if (current_location - bytes < tensor_boundary) {
if (overflow_buffers_ix > EI_MAX_OVERFLOW_BUFFER_COUNT - 1) {
ei_printf("ERR: Failed to allocate persistent buffer of size %d, does not fit in tensor arena and reached EI_MAX_OVERFLOW_BUFFER_COUNT\n",
(int)bytes);
return NULL;
}
// OK, this will look super weird, but.... we have CMSIS-NN buffers which
// we cannot calculate beforehand easily.
ptr = ei_calloc(bytes, 1);
if (ptr == NULL) {
ei_printf("ERR: Failed to allocate persistent buffer of size %d\n", (int)bytes);
return NULL;
}
overflow_buffers[overflow_buffers_ix++] = ptr;
return ptr;
}
current_location -= bytes;
ptr = current_location;
memset(ptr, 0, bytes);
return ptr;
}
typedef struct {
size_t bytes;
void *ptr;
} scratch_buffer_t;
static scratch_buffer_t scratch_buffers[EI_MAX_SCRATCH_BUFFER_COUNT];
static size_t scratch_buffers_ix = 0;
static TfLiteStatus RequestScratchBufferInArena(struct TfLiteContext* ctx, size_t bytes,
int* buffer_idx) {
if (scratch_buffers_ix > EI_MAX_SCRATCH_BUFFER_COUNT - 1) {
ei_printf("ERR: Failed to allocate scratch buffer of size %d, reached EI_MAX_SCRATCH_BUFFER_COUNT\n",
(int)bytes);
return kTfLiteError;
}
scratch_buffer_t b;
b.bytes = bytes;
b.ptr = AllocatePersistentBuffer(ctx, b.bytes);
if (!b.ptr) {
ei_printf("ERR: Failed to allocate scratch buffer of size %d\n",
(int)bytes);
return kTfLiteError;
}
scratch_buffers[scratch_buffers_ix] = b;
*buffer_idx = scratch_buffers_ix;
scratch_buffers_ix++;
return kTfLiteOk;
}
static void* GetScratchBuffer(struct TfLiteContext* ctx, int buffer_idx) {
if (buffer_idx > (int)scratch_buffers_ix) {
return NULL;
}
return scratch_buffers[buffer_idx].ptr;
}
static TfLiteTensor* GetTensor(const struct TfLiteContext* context,
int tensor_idx) {
return &tflTensors[tensor_idx];
}
static TfLiteEvalTensor* GetEvalTensor(const struct TfLiteContext* context,
int tensor_idx) {
return &tflEvalTensors[tensor_idx];
}
} // namespace
TfLiteStatus trained_model_init( void*(*alloc_fnc)(size_t,size_t) ) {
#ifdef EI_CLASSIFIER_ALLOCATION_HEAP
tensor_arena = (uint8_t*) alloc_fnc(16, kTensorArenaSize);
if (!tensor_arena) {
ei_printf("ERR: failed to allocate tensor arena\n");
return kTfLiteError;
}
#else
memset(tensor_arena, 0, kTensorArenaSize);
#endif
tensor_boundary = tensor_arena;
current_location = tensor_arena + kTensorArenaSize;
ctx.AllocatePersistentBuffer = &AllocatePersistentBuffer;
ctx.RequestScratchBufferInArena = &RequestScratchBufferInArena;
ctx.GetScratchBuffer = &GetScratchBuffer;
ctx.GetTensor = &GetTensor;
ctx.GetEvalTensor = &GetEvalTensor;
ctx.tensors = tflTensors;
ctx.tensors_size = 11;
for (size_t i = 0; i < 11; ++i) {
tflTensors[i].type = tensorData[i].type;
tflEvalTensors[i].type = tensorData[i].type;
tflTensors[i].is_variable = 0;
#if defined(EI_CLASSIFIER_ALLOCATION_HEAP)
tflTensors[i].allocation_type = tensorData[i].allocation_type;
#else
tflTensors[i].allocation_type = (tensor_arena <= tensorData[i].data && tensorData[i].data < tensor_arena + kTensorArenaSize) ? kTfLiteArenaRw : kTfLiteMmapRo;
#endif
tflTensors[i].bytes = tensorData[i].bytes;
tflTensors[i].dims = tensorData[i].dims;
tflEvalTensors[i].dims = tensorData[i].dims;
#if defined(EI_CLASSIFIER_ALLOCATION_HEAP)
if(tflTensors[i].allocation_type == kTfLiteArenaRw){
uint8_t* start = (uint8_t*) ((uintptr_t)tensorData[i].data + (uintptr_t) tensor_arena);
tflTensors[i].data.data = start;
tflEvalTensors[i].data.data = start;
}
else {
tflTensors[i].data.data = tensorData[i].data;
tflEvalTensors[i].data.data = tensorData[i].data;
}
#else
tflTensors[i].data.data = tensorData[i].data;
tflEvalTensors[i].data.data = tensorData[i].data;
#endif // EI_CLASSIFIER_ALLOCATION_HEAP
tflTensors[i].quantization = tensorData[i].quantization;
if (tflTensors[i].quantization.type == kTfLiteAffineQuantization) {
TfLiteAffineQuantization const* quant = ((TfLiteAffineQuantization const*)(tensorData[i].quantization.params));
tflTensors[i].params.scale = quant->scale->data[0];
tflTensors[i].params.zero_point = quant->zero_point->data[0];
}
if (tflTensors[i].allocation_type == kTfLiteArenaRw) {
auto data_end_ptr = (uint8_t*)tflTensors[i].data.data + tensorData[i].bytes;
if (data_end_ptr > tensor_boundary) {
tensor_boundary = data_end_ptr;
}
}
}
if (tensor_boundary > current_location /* end of arena size */) {
ei_printf("ERR: tensor arena is too small, does not fit model - even without scratch buffers\n");
return kTfLiteError;
}
registrations[OP_FULLY_CONNECTED] = Register_FULLY_CONNECTED();
registrations[OP_SOFTMAX] = Register_SOFTMAX();
for (size_t i = 0; i < 4; ++i) {
tflNodes[i].inputs = nodeData[i].inputs;
tflNodes[i].outputs = nodeData[i].outputs;
tflNodes[i].builtin_data = nodeData[i].builtin_data;
tflNodes[i].custom_initial_data = nullptr;
tflNodes[i].custom_initial_data_size = 0;
if (registrations[nodeData[i].used_op_index].init) {
tflNodes[i].user_data = registrations[nodeData[i].used_op_index].init(&ctx, (const char*)tflNodes[i].builtin_data, 0);
}
}
for (size_t i = 0; i < 4; ++i) {
if (registrations[nodeData[i].used_op_index].prepare) {
TfLiteStatus status = registrations[nodeData[i].used_op_index].prepare(&ctx, &tflNodes[i]);
if (status != kTfLiteOk) {
return status;
}
}
}
return kTfLiteOk;
}
static const int inTensorIndices[] = {
0,
};
TfLiteTensor* trained_model_input(int index) {
return &ctx.tensors[inTensorIndices[index]];
}
static const int outTensorIndices[] = {
10,
};
TfLiteTensor* trained_model_output(int index) {
return &ctx.tensors[outTensorIndices[index]];
}
TfLiteStatus trained_model_invoke() {
for (size_t i = 0; i < 4; ++i) {
TfLiteStatus status = registrations[nodeData[i].used_op_index].invoke(&ctx, &tflNodes[i]);
#if EI_CLASSIFIER_PRINT_STATE
ei_printf("layer %lu\n", i);
ei_printf(" inputs:\n");
for (size_t ix = 0; ix < tflNodes[i].inputs->size; ix++) {
auto d = tensorData[tflNodes[i].inputs->data[ix]];
size_t data_ptr = (size_t)d.data;
if (d.allocation_type == kTfLiteArenaRw) {
data_ptr = (size_t)tensor_arena + data_ptr;
}
if (d.type == TfLiteType::kTfLiteInt8) {
int8_t* data = (int8_t*)data_ptr;
ei_printf(" %lu (%zu bytes, ptr=%p, alloc_type=%d, type=%d): ", ix, d.bytes, data, (int)d.allocation_type, (int)d.type);
for (size_t jx = 0; jx < d.bytes; jx++) {
ei_printf("%d ", data[jx]);
}
}
else {
float* data = (float*)data_ptr;
ei_printf(" %lu (%zu bytes, ptr=%p, alloc_type=%d, type=%d): ", ix, d.bytes, data, (int)d.allocation_type, (int)d.type);
for (size_t jx = 0; jx < d.bytes / 4; jx++) {
ei_printf("%f ", data[jx]);
}
}
ei_printf("\n");
}
ei_printf("\n");
ei_printf(" outputs:\n");
for (size_t ix = 0; ix < tflNodes[i].outputs->size; ix++) {
auto d = tensorData[tflNodes[i].outputs->data[ix]];
size_t data_ptr = (size_t)d.data;
if (d.allocation_type == kTfLiteArenaRw) {
data_ptr = (size_t)tensor_arena + data_ptr;
}
if (d.type == TfLiteType::kTfLiteInt8) {
int8_t* data = (int8_t*)data_ptr;
ei_printf(" %lu (%zu bytes, ptr=%p, alloc_type=%d, type=%d): ", ix, d.bytes, data, (int)d.allocation_type, (int)d.type);
for (size_t jx = 0; jx < d.bytes; jx++) {
ei_printf("%d ", data[jx]);
}
}
else {
float* data = (float*)data_ptr;
ei_printf(" %lu (%zu bytes, ptr=%p, alloc_type=%d, type=%d): ", ix, d.bytes, data, (int)d.allocation_type, (int)d.type);
for (size_t jx = 0; jx < d.bytes / 4; jx++) {
ei_printf("%f ", data[jx]);
}
}
ei_printf("\n");
}
ei_printf("\n");
#endif // EI_CLASSIFIER_PRINT_STATE
if (status != kTfLiteOk) {
return status;
}
}
return kTfLiteOk;
}
TfLiteStatus trained_model_reset( void (*free_fnc)(void* ptr) ) {
#ifdef EI_CLASSIFIER_ALLOCATION_HEAP
free_fnc(tensor_arena);
#endif
// scratch buffers are allocated within the arena, so just reset the counter so memory can be reused
scratch_buffers_ix = 0;
// overflow buffers are on the heap, so free them first
for (size_t ix = 0; ix < overflow_buffers_ix; ix++) {
ei_free(overflow_buffers[ix]);
}
overflow_buffers_ix = 0;
return kTfLiteOk;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment