EyeAI
Loading...
Searching...
No Matches
TfLiteRuntime Class Reference

Helper class that wraps the tflite c api. More...

#include <TfLiteRuntime.hpp>

Public Types

using CreateResult
 

Public Member Functions

 ~TfLiteRuntime ()
 
std::optional< TfLiteRunInferenceError > run_inference (std::span< float > input, std::span< float > output)
 Run inference on the model, make sure input and output have the right amount of elements.
 
template<FloatTensorFormat InputFormat, FloatTensorFormat OutputFormat>
tl::expected< FloatTensorBuffer< OutputFormat >, TfLiteRunInferenceError > run_inference (FloatTensorBuffer< InputFormat > &input)
 
std::span< const int > get_input_shape () const
 
std::span< const int > get_output_shape () const
 
 TfLiteRuntime (TfLiteRuntime &&)=delete
 
 TfLiteRuntime (const TfLiteRuntime &)=delete
 
void operator= (TfLiteRuntime &&)=delete
 
void operator= (const TfLiteRuntime &)=delete
 

Static Public Member Functions

static CreateResult create (std::vector< int8_t > &&model_data, std::string_view delegate_serialization_dir, std::string_view model_token, FloatTensorFormat model_input_format, FloatTensorFormat model_output_format, TfLiteLogWarningCallback log_warning_callback, TfLiteLogErrorCallback log_error_callback, ProfilingFrame &profiling_frame, NpuConfiguration npu_config, bool enable_npu, std::string skel_library_dir)
 Create a TfLiteRuntime instance.
 

Detailed Description

Helper class that wraps the tflite c api.

Member Typedef Documentation

◆ CreateResult

Initial value:
tl::expected<std::unique_ptr<TfLiteRuntime>, TfLiteCreateRuntimeError>

Constructor & Destructor Documentation

◆ ~TfLiteRuntime()

TfLiteRuntime::~TfLiteRuntime ( )

◆ TfLiteRuntime() [1/2]

TfLiteRuntime::TfLiteRuntime ( TfLiteRuntime && )
delete

◆ TfLiteRuntime() [2/2]

TfLiteRuntime::TfLiteRuntime ( const TfLiteRuntime & )
delete

Member Function Documentation

◆ create()

tl::expected< std::unique_ptr< TfLiteRuntime >, TfLiteCreateRuntimeError > TfLiteRuntime::create ( std::vector< int8_t > && model_data,
std::string_view delegate_serialization_dir,
std::string_view model_token,
FloatTensorFormat model_input_format,
FloatTensorFormat model_output_format,
TfLiteLogWarningCallback log_warning_callback,
TfLiteLogErrorCallback log_error_callback,
ProfilingFrame & profiling_frame,
NpuConfiguration npu_config,
bool enable_npu,
std::string skel_library_dir )
staticnodiscard

Create a TfLiteRuntime instance.

◆ get_input_shape()

std::span< const int > TfLiteRuntime::get_input_shape ( ) const
nodiscard

◆ get_output_shape()

std::span< const int > TfLiteRuntime::get_output_shape ( ) const
nodiscard

◆ operator=() [1/2]

void TfLiteRuntime::operator= ( const TfLiteRuntime & )
delete

◆ operator=() [2/2]

void TfLiteRuntime::operator= ( TfLiteRuntime && )
delete

◆ run_inference() [1/2]

template<FloatTensorFormat InputFormat, FloatTensorFormat OutputFormat>
tl::expected< FloatTensorBuffer< OutputFormat >, TfLiteRunInferenceError > TfLiteRuntime::run_inference ( FloatTensorBuffer< InputFormat > & input)
inlinenodiscard

◆ run_inference() [2/2]

std::optional< TfLiteRunInferenceError > TfLiteRuntime::run_inference ( std::span< float > input,
std::span< float > output )
nodiscard

Run inference on the model, make sure input and output have the right amount of elements.

Parameters
inputinput will be modified by input operators, should be in format model_input_format
outputoutput will be modified by output operators, should be in format model_output_format