EyeAI
Loading...
Searching...
No Matches
TfLiteUtils.cpp File Reference
Include dependency graph for TfLiteUtils.cpp:

Functions

std::optional< TfLiteAffineQuantization > get_tensor_quantization (const TfLiteTensor *tensor)
 
std::span< const int > get_tensor_shape (const TfLiteTensor *tensor)
 
std::unique_ptr< TfLiteDelegate, decltype(&TfLiteGpuDelegateV2Delete)> create_gpu_delegate (std::string_view gpu_delegate_serialization_dir, std::string_view model_token, ProfilingFrame &profiling_frame)
 
std::unique_ptr< TfLiteDelegate, void(*)(TfLiteDelegate *)> create_qnn_npu_delegate (std::string_view delegate_serialization_dir, std::string_view model_token, NpuConfiguration config, std::string_view skel_library_dir)
 
std::optional< TfLiteLoadInputError > load_input_tensor_with_floats (TfLiteTensor *input_tensor, std::span< const float > values, ProfilingFrame &profiling_frame)
 loads input tensor with floats array, supports quantization
 
std::optional< TfLiteReadOutputError > read_floats_from_output_tensor (const TfLiteTensor *output_tensor, std::span< float > output, ProfilingFrame &profiling_frame)
 reads floats array from output tensor, supports quantization
 
std::string_view format_tflite_type (TfLiteType type)
 
std::string_view format_tflite_status (TfLiteStatus status)
 
std::optional< size_t > get_tflite_type_size (TfLiteType type)
 

Function Documentation

◆ create_gpu_delegate()

std::unique_ptr< TfLiteDelegate, decltype(&TfLiteGpuDelegateV2Delete)> create_gpu_delegate ( std::string_view gpu_delegate_serialization_dir,
std::string_view model_token,
ProfilingFrame & profiling_frame )
nodiscard
Parameters
gpu_delegate_serialization_dirDirectory where TfLite saves compiled GPU delegate kernels
model_tokenunique token to identify the model, should change on model update
profiling_frameprofiling frame used for profiling

◆ create_qnn_npu_delegate()

std::unique_ptr< TfLiteDelegate, void(*)(TfLiteDelegate *)> create_qnn_npu_delegate ( std::string_view delegate_serialization_dir,
std::string_view model_token,
NpuConfiguration config,
std::string_view skel_library_dir )
nodiscard
Returns
nullptr if platform does not support qnn delegate right now

◆ format_tflite_status()

std::string_view format_tflite_status ( TfLiteStatus status)

◆ format_tflite_type()

std::string_view format_tflite_type ( TfLiteType type)

◆ get_tensor_quantization()

std::optional< TfLiteAffineQuantization > get_tensor_quantization ( const TfLiteTensor * tensor)
nodiscard
Returns
internal quantization parameters of tensor, or nullopt if tensor is not quantized

◆ get_tensor_shape()

std::span< const int > get_tensor_shape ( const TfLiteTensor * tensor)
nodiscard

◆ get_tflite_type_size()

std::optional< size_t > get_tflite_type_size ( TfLiteType type)
Returns
byte size of type, or nullopt if type has a dynamic size

◆ load_input_tensor_with_floats()

std::optional< TfLiteLoadInputError > load_input_tensor_with_floats ( TfLiteTensor * input_tensor,
std::span< const float > values,
ProfilingFrame & profiling_frame )
nodiscard

loads input tensor with floats array, supports quantization

◆ read_floats_from_output_tensor()

std::optional< TfLiteReadOutputError > read_floats_from_output_tensor ( const TfLiteTensor * output_tensor,
std::span< float > output,
ProfilingFrame & profiling_frame )
nodiscard

reads floats array from output tensor, supports quantization