EyeAI
Loading...
Searching...
No Matches
TfLiteUtils.hpp File Reference
#include "EyeAICore/TensorBuffer.hpp"
#include "EyeAICore/utils/Errors.hpp"
#include <memory>
#include <optional>
#include <span>
#include <string_view>
#include <tensorflow/lite/c/c_api.h>
#include <tensorflow/lite/delegates/gpu/delegate.h>
Include dependency graph for TfLiteUtils.hpp:
This graph shows which files directly or indirectly include this file:

Go to the source code of this file.

Classes

class  TensorType
 either a input or a output tensor More...
 
struct  TfLiteNonFloatTensorTypeError
 
struct  TfLiteTensorsNotCreatedError
 
struct  TfLiteTensorElementCountMismatch
 
struct  TfLiteCopyFromInputTensorError
 
struct  InvalidFloat32QuantizationTypeError
 
struct  QuantizationElementsMismatch
 
struct  AsymmetricQuantizationError
 
struct  InvalidQuantizedType
 
struct  TfLiteCopyToOutputTensorError
 
struct  TfLiteCreateInterpreterError
 
struct  TfLiteAllocateTensorsError
 
struct  TfLiteInvokeInterpreterError
 
struct  InvalidInputFormatForModel
 
struct  InvalidOutputFormatForModel
 

Enumerations

enum class  NpuConfiguration { MiDaS , rel2abs , Yolo }
 

Functions

std::string_view format_tflite_type (TfLiteType type)
 
std::optional< size_t > get_tflite_type_size (TfLiteType type)
 
std::string_view format_tflite_status (TfLiteStatus status)
 
std::optional< TfLiteAffineQuantization > get_tensor_quantization (const TfLiteTensor *tensor)
 
std::span< const int > get_tensor_shape (const TfLiteTensor *tensor)
 
std::unique_ptr< TfLiteDelegate, decltype(&TfLiteGpuDelegateV2Delete)> create_gpu_delegate (std::string_view gpu_delegate_serialization_dir, std::string_view model_token, ProfilingFrame &profiling_frame)
 
std::unique_ptr< TfLiteDelegate, void(*)(TfLiteDelegate *)> create_qnn_npu_delegate (std::string_view delegate_serialization_dir, std::string_view model_token, NpuConfiguration config, std::string_view skel_library_dir)
 
 COMBINED_ERROR (TfLiteLoadNonQuantizedInputError, TfLiteTensorsNotCreatedError, TfLiteNonFloatTensorTypeError, TfLiteTensorElementCountMismatch, TfLiteCopyFromInputTensorError)
 
 COMBINED_ERROR (QuantizeFloatError, InvalidFloat32QuantizationTypeError, QuantizationElementsMismatch, AsymmetricQuantizationError)
 
 COMBINED_ERROR (TfLiteLoadQuantizedInputError, TfLiteTensorsNotCreatedError, TfLiteTensorElementCountMismatch, InvalidQuantizedType, QuantizeFloatError)
 
 COMBINED_ERROR (TfLiteLoadInputError, TfLiteLoadNonQuantizedInputError, TfLiteLoadQuantizedInputError)
 
std::optional< TfLiteLoadInputError > load_input_tensor_with_floats (TfLiteTensor *input_tensor, std::span< const float > values, ProfilingFrame &profiling_frame)
 loads input tensor with floats array, supports quantization
 
 COMBINED_ERROR (TfLiteReadNonQuantizedOutputError, TfLiteNonFloatTensorTypeError, TfLiteTensorElementCountMismatch, TfLiteCopyToOutputTensorError)
 
 COMBINED_ERROR (DequantizeFloatError, InvalidFloat32QuantizationTypeError, QuantizationElementsMismatch, AsymmetricQuantizationError)
 
 COMBINED_ERROR (TfLiteReadQuantizedOutputError, TfLiteTensorsNotCreatedError, TfLiteTensorElementCountMismatch, DequantizeFloatError)
 
 COMBINED_ERROR (TfLiteReadOutputError, TfLiteReadNonQuantizedOutputError, TfLiteReadQuantizedOutputError)
 
std::optional< TfLiteReadOutputError > read_floats_from_output_tensor (const TfLiteTensor *output_tensor, std::span< float > output, ProfilingFrame &profiling_frame)
 reads floats array from output tensor, supports quantization
 
 COMBINED_ERROR (TfLiteCreateRuntimeError, TfLiteCreateInterpreterError, TfLiteAllocateTensorsError)
 
 COMBINED_ERROR (TfLiteRunInferenceError, TfLiteLoadInputError, TfLiteInvokeInterpreterError, TfLiteReadOutputError, InvalidInputFormatForModel, InvalidOutputFormatForModel)
 

Enumeration Type Documentation

◆ NpuConfiguration

enum class NpuConfiguration
strong
Enumerator
MiDaS 
rel2abs 
Yolo 

Function Documentation

◆ COMBINED_ERROR() [1/10]

◆ COMBINED_ERROR() [2/10]

◆ COMBINED_ERROR() [3/10]

COMBINED_ERROR ( TfLiteCreateRuntimeError ,
TfLiteCreateInterpreterError ,
TfLiteAllocateTensorsError  )

◆ COMBINED_ERROR() [4/10]

COMBINED_ERROR ( TfLiteLoadInputError ,
TfLiteLoadNonQuantizedInputError ,
TfLiteLoadQuantizedInputError  )

◆ COMBINED_ERROR() [5/10]

◆ COMBINED_ERROR() [6/10]

COMBINED_ERROR ( TfLiteLoadQuantizedInputError ,
TfLiteTensorsNotCreatedError ,
TfLiteTensorElementCountMismatch ,
InvalidQuantizedType ,
QuantizeFloatError  )

◆ COMBINED_ERROR() [7/10]

COMBINED_ERROR ( TfLiteReadNonQuantizedOutputError ,
TfLiteNonFloatTensorTypeError ,
TfLiteTensorElementCountMismatch ,
TfLiteCopyToOutputTensorError  )

◆ COMBINED_ERROR() [8/10]

COMBINED_ERROR ( TfLiteReadOutputError ,
TfLiteReadNonQuantizedOutputError ,
TfLiteReadQuantizedOutputError  )

◆ COMBINED_ERROR() [9/10]

COMBINED_ERROR ( TfLiteReadQuantizedOutputError ,
TfLiteTensorsNotCreatedError ,
TfLiteTensorElementCountMismatch ,
DequantizeFloatError  )

◆ COMBINED_ERROR() [10/10]

COMBINED_ERROR ( TfLiteRunInferenceError ,
TfLiteLoadInputError ,
TfLiteInvokeInterpreterError ,
TfLiteReadOutputError ,
InvalidInputFormatForModel ,
InvalidOutputFormatForModel  )

◆ create_gpu_delegate()

std::unique_ptr< TfLiteDelegate, decltype(&TfLiteGpuDelegateV2Delete)> create_gpu_delegate ( std::string_view gpu_delegate_serialization_dir,
std::string_view model_token,
ProfilingFrame & profiling_frame )
nodiscard
Parameters
gpu_delegate_serialization_dirDirectory where TfLite saves compiled GPU delegate kernels
model_tokenunique token to identify the model, should change on model update
profiling_frameprofiling frame used for profiling

◆ create_qnn_npu_delegate()

std::unique_ptr< TfLiteDelegate, void(*)(TfLiteDelegate *)> create_qnn_npu_delegate ( std::string_view delegate_serialization_dir,
std::string_view model_token,
NpuConfiguration config,
std::string_view skel_library_dir )
nodiscard
Returns
nullptr if platform does not support qnn delegate right now

◆ format_tflite_status()

std::string_view format_tflite_status ( TfLiteStatus status)

◆ format_tflite_type()

std::string_view format_tflite_type ( TfLiteType type)

◆ get_tensor_quantization()

std::optional< TfLiteAffineQuantization > get_tensor_quantization ( const TfLiteTensor * tensor)
nodiscard
Returns
internal quantization parameters of tensor, or nullopt if tensor is not quantized

◆ get_tensor_shape()

std::span< const int > get_tensor_shape ( const TfLiteTensor * tensor)
nodiscard

◆ get_tflite_type_size()

std::optional< size_t > get_tflite_type_size ( TfLiteType type)
Returns
byte size of type, or nullopt if type has a dynamic size

◆ load_input_tensor_with_floats()

std::optional< TfLiteLoadInputError > load_input_tensor_with_floats ( TfLiteTensor * input_tensor,
std::span< const float > values,
ProfilingFrame & profiling_frame )
nodiscard

loads input tensor with floats array, supports quantization

◆ read_floats_from_output_tensor()

std::optional< TfLiteReadOutputError > read_floats_from_output_tensor ( const TfLiteTensor * output_tensor,
std::span< float > output,
ProfilingFrame & profiling_frame )
nodiscard

reads floats array from output tensor, supports quantization