C API for TensorFlow Lite.
The API leans towards simplicity and uniformity instead of convenience, as most usage will be by language-specific wrappers. It provides largely the same set of functionality as that of the C++ TensorFlow Lite Interpreter
API, but is useful for shared libraries where having a stable ABI boundary is important.
Conventions:
Usage:
// Create the model and interpreter options.
TfLiteModel* model = TfLiteModelCreateFromFile("/path/to/model.tflite");
TfLiteInterpreterOptions* options = TfLiteInterpreterOptionsCreate();
TfLiteInterpreterOptionsSetNumThreads(options, 2);
// Create the interpreter.
TfLiteInterpreter* interpreter = TfLiteInterpreterCreate(model, options);
// Allocate tensors and populate the input tensor data.
TfLiteInterpreterAllocateTensors(interpreter);
TfLiteTensor* input_tensor =
TfLiteInterpreterGetInputTensor(interpreter, 0);
TfLiteTensorCopyFromBuffer(input_tensor, input.data(),
input.size() * sizeof(float));
// Execute inference.
TfLiteInterpreterInvoke(interpreter);
// Extract the output tensor data.
const TfLiteTensor* output_tensor =
TfLiteInterpreterGetOutputTensor(interpreter, 0);
TfLiteTensorCopyToBuffer(output_tensor, output.data(),
output.size() * sizeof(float));
// Dispose of the model and interpreter objects.
TfLiteInterpreterDelete(interpreter);
TfLiteInterpreterOptionsDelete(options);
TfLiteModelDelete(model);
Typedefs |
|
---|---|
TfLiteInterpreter
|
typedefstruct TfLiteInterpreter
TfLiteInterpreter provides inference from a provided model. |
TfLiteInterpreterOptions
|
typedefstruct TfLiteInterpreterOptions
TfLiteInterpreterOptions allows customized interpreter configuration. |
TfLiteModel
|
typedefstruct TfLiteModel
TfLiteModel wraps a loaded TensorFlow Lite model. |
TfLiteSignatureRunner
|
typedefstruct TfLiteSignatureRunner
TfLiteSignatureRunner is used to run inference on a signature. |
TfLiteTensor
|
typedefstruct TfLiteTensor
A tensor in the interpreter system which is a wrapper around a buffer of data including a dimensionality (or NULL if not currently defined). |
Functions |
|
---|---|
TfLiteExtensionApisVersion(void)
|
TFL_CAPI_EXPORT const char *
The TensorFlow Lite Extension APIs version.
|
TfLiteInterpreterAllocateTensors(TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT TfLiteStatus
Updates allocations for all tensors, resizing dependent tensors using the specified input tensor dimensionality.
|
TfLiteInterpreterCancel(const TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT TfLiteStatus
Tries to cancel any in-flight invocation.
|
TfLiteInterpreterCreate(const TfLiteModel *model, const TfLiteInterpreterOptions *optional_options)
|
TFL_CAPI_EXPORT TfLiteInterpreter *
Returns a new interpreter using the provided model and options, or null on failure.
|
TfLiteInterpreterDelete(TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT void
Destroys the interpreter.
|
TfLiteInterpreterGetInputTensor(const TfLiteInterpreter *interpreter, int32_t input_index)
|
TFL_CAPI_EXPORT TfLiteTensor *
Returns the tensor associated with the input index.
|
TfLiteInterpreterGetInputTensorCount(const TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT int32_t
Returns the number of input tensors associated with the model.
|
TfLiteInterpreterGetOutputTensor(const TfLiteInterpreter *interpreter, int32_t output_index)
|
TFL_CAPI_EXPORT const TfLiteTensor *
Returns the tensor associated with the output index.
|
TfLiteInterpreterGetOutputTensorCount(const TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT int32_t
Returns the number of output tensors associated with the model.
|
TfLiteInterpreterGetSignatureCount(const TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT int32_t
SignatureRunner APIs |
TfLiteInterpreterGetSignatureKey(const TfLiteInterpreter *interpreter, int32_t signature_index)
|
TFL_CAPI_EXPORT const char *
Returns the key of the Nth signature in the model, where N is specified as
signature_index . |
TfLiteInterpreterGetSignatureRunner(const TfLiteInterpreter *interpreter, const char *signature_key)
|
TFL_CAPI_EXPORT TfLiteSignatureRunner *
Returns a new signature runner using the provided interpreter and signature key, or nullptr on failure.
|
TfLiteInterpreterGetTensor(const TfLiteInterpreter *interpreter, int index)
|
TFL_CAPI_EXPORT TfLiteTensor *
Returns modifiable access to the tensor that corresponds to the specified
index and is associated with the provided interpreter . |
TfLiteInterpreterInputTensorIndices(const TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT const int *
Returns a pointer to an array of input tensor indices.
|
TfLiteInterpreterInvoke(TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT TfLiteStatus
Runs inference for the loaded graph.
|
TfLiteInterpreterOptionsAddDelegate(TfLiteInterpreterOptions *options, TfLiteOpaqueDelegate *delegate)
|
TFL_CAPI_EXPORT void
Adds a delegate to be applied during
TfLiteInterpreter creation. |
TfLiteInterpreterOptionsAddRegistrationExternal(TfLiteInterpreterOptions *options, TfLiteRegistrationExternal *registration)
|
TFL_CAPI_EXPORT void
Adds an op registration to be applied during
TfLiteInterpreter creation. |
TfLiteInterpreterOptionsCopy(const TfLiteInterpreterOptions *from)
|
TFL_CAPI_EXPORT TfLiteInterpreterOptions *
Creates and returns a shallow copy of an options object.
|
TfLiteInterpreterOptionsCreate()
|
TFL_CAPI_EXPORT TfLiteInterpreterOptions *
Returns a new interpreter options instances.
|
TfLiteInterpreterOptionsDelete(TfLiteInterpreterOptions *options)
|
TFL_CAPI_EXPORT void
Destroys the interpreter options instance.
|
TfLiteInterpreterOptionsEnableCancellation(TfLiteInterpreterOptions *options, bool enable)
|
TFL_CAPI_EXPORT TfLiteStatus
Enables users to cancel in-flight invocations with
TfLiteInterpreterCancel . |
TfLiteInterpreterOptionsSetErrorReporter(TfLiteInterpreterOptions *options, void(*)(void *user_data, const char *format, va_list args) reporter, void *user_data)
|
TFL_CAPI_EXPORT void
Sets a custom error reporter for interpreter execution.
|
TfLiteInterpreterOptionsSetNumThreads(TfLiteInterpreterOptions *options, int32_t num_threads)
|
TFL_CAPI_EXPORT void
Sets the number of CPU threads to use for the interpreter.
|
TfLiteInterpreterOutputTensorIndices(const TfLiteInterpreter *interpreter)
|
TFL_CAPI_EXPORT const int *
Returns a pointer to an array of output tensor indices.
|
TfLiteInterpreterResizeInputTensor(TfLiteInterpreter *interpreter, int32_t input_index, const int *input_dims, int32_t input_dims_size)
|
TFL_CAPI_EXPORT TfLiteStatus
Resizes the specified input tensor.
|
TfLiteModelCreate(const void *model_data, size_t model_size)
|
TFL_CAPI_EXPORT TfLiteModel *
Returns a model from the provided buffer, or null on failure.
|
TfLiteModelCreateFromFile(const char *model_path)
|
TFL_CAPI_EXPORT TfLiteModel *
Returns a model from the provided file, or null on failure.
|
TfLiteModelCreateFromFileWithErrorReporter(const char *model_path, void(*)(void *user_data, const char *format, va_list args) reporter, void *user_data)
|
TFL_CAPI_EXPORT TfLiteModel *
Same as
TfLiteModelCreateFromFile with customizble error reporter. |
TfLiteModelCreateWithErrorReporter(const void *model_data, size_t model_size, void(*)(void *user_data, const char *format, va_list args) reporter, void *user_data)
|
TFL_CAPI_EXPORT TfLiteModel *
Same as
TfLiteModelCreate with customizble error reporter. |
TfLiteModelDelete(TfLiteModel *model)
|
TFL_CAPI_EXPORT void
Destroys the model instance.
|
TfLiteSchemaVersion(void)
|
TFL_CAPI_EXPORT int
The supported TensorFlow Lite model file Schema version.
|
TfLiteSignatureRunnerAllocateTensors(TfLiteSignatureRunner *signature_runner)
|
TFL_CAPI_EXPORT TfLiteStatus
Updates allocations for tensors associated with a signature and resizes dependent tensors using the specified input tensor dimensionality.
|
TfLiteSignatureRunnerDelete(TfLiteSignatureRunner *signature_runner)
|
TFL_CAPI_EXPORT void
Destroys the signature runner.
|
TfLiteSignatureRunnerGetInputCount(const TfLiteSignatureRunner *signature_runner)
|
TFL_CAPI_EXPORT size_t
Returns the number of inputs associated with a signature.
|
TfLiteSignatureRunnerGetInputName(const TfLiteSignatureRunner *signature_runner, int32_t input_index)
|
TFL_CAPI_EXPORT const char *
Returns the (null-terminated) name of the Nth input in a signature, where N is specified as
input_index . |
TfLiteSignatureRunnerGetInputTensor(TfLiteSignatureRunner *signature_runner, const char *input_name)
|
TFL_CAPI_EXPORT TfLiteTensor *
Returns the input tensor identified by
input_name in the given signature. |
TfLiteSignatureRunnerGetOutputCount(const TfLiteSignatureRunner *signature_runner)
|
TFL_CAPI_EXPORT size_t
Returns the number of output tensors associated with the signature.
|
TfLiteSignatureRunnerGetOutputName(const TfLiteSignatureRunner *signature_runner, int32_t output_index)
|
TFL_CAPI_EXPORT const char *
Returns the (null-terminated) name of the Nth output in a signature, where N is specified as
output_index . |
TfLiteSignatureRunnerGetOutputTensor(const TfLiteSignatureRunner *signature_runner, const char *output_name)
|
TFL_CAPI_EXPORT const TfLiteTensor *
Returns the output tensor identified by
output_name in the given signature. |
TfLiteSignatureRunnerInvoke(TfLiteSignatureRunner *signature_runner)
|
TFL_CAPI_EXPORT TfLiteStatus
Runs inference on a given signature.
|
TfLiteSignatureRunnerResizeInputTensor(TfLiteSignatureRunner *signature_runner, const char *input_name, const int *input_dims, int32_t input_dims_size)
|
TFL_CAPI_EXPORT TfLiteStatus
Resizes the input tensor identified as
input_name to be the dimensions specified by input_dims and input_dims_size . |
TfLiteTensorByteSize(const TfLiteTensor *tensor)
|
TFL_CAPI_EXPORT size_t
Returns the size of the underlying data in bytes.
|
TfLiteTensorCopyFromBuffer(TfLiteTensor *tensor, const void *input_data, size_t input_data_size)
|
TFL_CAPI_EXPORT TfLiteStatus
Copies from the provided input buffer into the tensor's buffer.
|
TfLiteTensorCopyToBuffer(const TfLiteTensor *output_tensor, void *output_data, size_t output_data_size)
|
TFL_CAPI_EXPORT TfLiteStatus
Copies to the provided output buffer from the tensor's buffer.
|
TfLiteTensorData(const TfLiteTensor *tensor)
|
TFL_CAPI_EXPORT void *
Returns a pointer to the underlying data buffer.
|
TfLiteTensorDim(const TfLiteTensor *tensor, int32_t dim_index)
|
TFL_CAPI_EXPORT int32_t
Returns the length of the tensor in the "dim_index" dimension.
|
TfLiteTensorName(const TfLiteTensor *tensor)
|
TFL_CAPI_EXPORT const char *
Returns the (null-terminated) name of the tensor.
|
TfLiteTensorNumDims(const TfLiteTensor *tensor)
|
TFL_CAPI_EXPORT int32_t
Returns the number of dimensions that the tensor has.
|
TfLiteTensorQuantizationParams(const TfLiteTensor *tensor)
|
TFL_CAPI_EXPORT TfLiteQuantizationParams
Returns the parameters for asymmetric quantization.
|
TfLiteTensorType(const TfLiteTensor *tensor)
|
TFL_CAPI_EXPORT TfLiteType
Returns the type of a tensor element.
|
TfLiteVersion(void)
|
TFL_CAPI_EXPORT const char *
The TensorFlow Lite Runtime version.
|
struct TfLiteInterpreter TfLiteInterpreter
TfLiteInterpreter provides inference from a provided model.
struct TfLiteInterpreterOptions TfLiteInterpreterOptions
TfLiteInterpreterOptions allows customized interpreter configuration.
struct TfLiteSignatureRunner TfLiteSignatureRunner
TfLiteSignatureRunner is used to run inference on a signature.
Note: A signature is used to define a computation in a TF model. A model can have multiple signatures. Each signature contains three components:
To learn more about signatures in TFLite, refer to: https://www.tensorflow.org/lite/guide/signatures
Using the TfLiteSignatureRunner, for a particular signature, you can set its inputs, invoke (i.e. execute) the computation, and retrieve its outputs.
struct TfLiteTensor TfLiteTensor
A tensor in the interpreter system which is a wrapper around a buffer of data including a dimensionality (or NULL if not currently defined).
TFL_CAPI_EXPORT const char * TfLiteExtensionApisVersion( void )
The TensorFlow Lite Extension APIs version.
Returns a pointer to a statically allocated string that is the version number of the TF Lite Extension APIs supported by the (potentially dynamically loaded) TF Lite Runtime library. The TF Lite "Extension APIs" are the APIs for extending TF Lite with custom ops and delegates. More specifically, this version number covers the (non-experimental) functionality documented in the following header files:
This version number uses semantic versioning, and the return value should be in semver 2 format http://semver.org, starting with MAJOR.MINOR.PATCH, e.g. "2.14.0" or "2.15.0-rc2".
TFL_CAPI_EXPORT TfLiteStatus TfLiteInterpreterAllocateTensors( TfLiteInterpreter *interpreter )
Updates allocations for all tensors, resizing dependent tensors using the specified input tensor dimensionality.
This is a relatively expensive operation, and need only be called after creating the graph and/or resizing any inputs.
TFL_CAPI_EXPORT TfLiteStatus TfLiteInterpreterCancel( const TfLiteInterpreter *interpreter )
Tries to cancel any in-flight invocation.
Returns kTfLiteError if cancellation is not enabled via TfLiteInterpreterOptionsEnableCancellation
.
TFL_CAPI_EXPORT TfLiteInterpreter * TfLiteInterpreterCreate( const TfLiteModel *model, const TfLiteInterpreterOptions *optional_options )
Returns a new interpreter using the provided model and options, or null on failure.
model
must be a valid model instance. The caller retains ownership of the object, and may destroy it (via TfLiteModelDelete) immediately after creating the interpreter. However, if the TfLiteModel was allocated with TfLiteModelCreate, then the model_data
buffer that was passed to TfLiteModelCreate must outlive the lifetime of the TfLiteInterpreter object that this function returns, and must not be modified during that time; and if the TfLiteModel was allocated with TfLiteModelCreateFromFile, then the contents of the model file must not be modified during the lifetime of the TfLiteInterpreter object that this function returns.optional_options
may be null. The caller retains ownership of the object, and can safely destroy it (via TfLiteInterpreterOptionsDelete) immediately after creating the interpreter.
TFL_CAPI_EXPORT void TfLiteInterpreterDelete( TfLiteInterpreter *interpreter )
Destroys the interpreter.
TFL_CAPI_EXPORT TfLiteTensor * TfLiteInterpreterGetInputTensor( const TfLiteInterpreter *interpreter, int32_t input_index )
Returns the tensor associated with the input index.
REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor)
TFL_CAPI_EXPORT int32_t TfLiteInterpreterGetInputTensorCount( const TfLiteInterpreter *interpreter )
Returns the number of input tensors associated with the model.
TFL_CAPI_EXPORT const TfLiteTensor * TfLiteInterpreterGetOutputTensor( const TfLiteInterpreter *interpreter, int32_t output_index )
Returns the tensor associated with the output index.
REQUIRES: 0 <= output_index < TfLiteInterpreterGetOutputTensorCount(tensor)
TFL_CAPI_EXPORT int32_t TfLiteInterpreterGetOutputTensorCount( const TfLiteInterpreter *interpreter )
Returns the number of output tensors associated with the model.
TFL_CAPI_EXPORT int32_t TfLiteInterpreterGetSignatureCount( const TfLiteInterpreter *interpreter )
You can run inference by either:
(i) (recommended) using the Interpreter to initialize SignatureRunner(s) and then only using SignatureRunner APIs.
(ii) only using Interpreter APIs.
NOTE:
TFL_CAPI_EXPORT const char * TfLiteInterpreterGetSignatureKey( const TfLiteInterpreter *interpreter, int32_t signature_index )
Returns the key of the Nth signature in the model, where N is specified as signature_index
.
NOTE: The lifetime of the returned key is the same as (and depends on) the lifetime of interpreter
.
TFL_CAPI_EXPORT TfLiteSignatureRunner * TfLiteInterpreterGetSignatureRunner( const TfLiteInterpreter *interpreter, const char *signature_key )
Returns a new signature runner using the provided interpreter and signature key, or nullptr on failure.
NOTE: signature_key
is a null-terminated C string that must match the key of a signature in the interpreter's model.
NOTE: The returned signature runner should be destroyed, by calling TfLiteSignatureRunnerDelete(), before the interpreter is destroyed.
TFL_CAPI_EXPORT TfLiteTensor * TfLiteInterpreterGetTensor( const TfLiteInterpreter *interpreter, int index )
Returns modifiable access to the tensor that corresponds to the specified index
and is associated with the provided interpreter
.
This requires the index
to be between 0 and N - 1, where N is the number of tensors in the model.
Typically the tensors associated with the interpreter
would be set during the interpreter
initialization, through a mechanism like the InterpreterBuilder
, and remain unchanged throughout the lifetime of the interpreter. However, there are some circumstances in which the pointer may not remain valid throughout the lifetime of the interpreter, because calls to AddTensors
on the interpreter invalidate the returned pointer.
Note the difference between this function and TfLiteInterpreterGetInputTensor
(or TfLiteInterpreterGetOutputTensor
for that matter): TfLiteInterpreterGetTensor
takes an index into the array of all tensors associated with the interpreter
's model, whereas TfLiteInterpreterGetInputTensor
takes an index into the array of input tensors.
The ownership of the tensor remains with the TFLite runtime, meaning the caller should not deallocate the pointer.
TFL_CAPI_EXPORT const int * TfLiteInterpreterInputTensorIndices( const TfLiteInterpreter *interpreter )
Returns a pointer to an array of input tensor indices.
The length of the array can be obtained via a call to TfLiteInterpreterGetInputTensorCount
.
Typically the input tensors associated with an interpreter
would be set during the initialization of the interpreter
, through a mechanism like the InterpreterBuilder
, and remain unchanged throughout the lifetime of the interpreter. However, there are some circumstances in which the pointer may not remain valid throughout the lifetime of the interpreter, because calls to SetInputs
on the interpreter invalidate the returned pointer.
The ownership of the array remains with the TFLite runtime.
TFL_CAPI_EXPORT TfLiteStatus TfLiteInterpreterInvoke( TfLiteInterpreter *interpreter )
Runs inference for the loaded graph.
Before calling this function, the caller should first invoke TfLiteInterpreterAllocateTensors() and should also set the values for the input tensors. After successfully calling this function, the values for the output tensors will be set.
If the (experimental!) delegate fallback option was enabled in the interpreter options, then the interpreter will automatically fall back to not using any delegates if execution with delegates fails. For details, see TfLiteInterpreterOptionsSetEnableDelegateFallback in c_api_experimental.h.
Returns one of the following status codes:
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsAddDelegate( TfLiteInterpreterOptions *options, TfLiteOpaqueDelegate *delegate )
Adds a delegate to be applied during TfLiteInterpreter
creation.
If delegate application fails, interpreter creation will also fail with an associated error logged.
If you are NOT using "TensorFlow Lite in Play Services", and NOT building with TFLITE_WITH_STABLE_ABI
or TFLITE_USE_OPAQUE_DELEGATE
macros enabled, it is possible to pass a TfLiteDelegate*
rather than a TfLiteOpaqueDelegate*
to this function, since in those cases, TfLiteOpaqueDelegate
is just a typedef alias for TfLiteDelegate
. This is for compatibility with existing source code and existing delegates. For new delegates, it is recommended to use TfLiteOpaqueDelegate
rather than TfLiteDelegate
. (See TfLiteOpaqueDelegate
in tensorflow/lite/core/c/c_api_types.h.)
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsAddRegistrationExternal( TfLiteInterpreterOptions *options, TfLiteRegistrationExternal *registration )
Adds an op registration to be applied during TfLiteInterpreter
creation.
The TfLiteRegistrationExternal
object is needed to implement custom op of TFLite Interpreter via C API. Calling this function ensures that any TfLiteInterpreter
created with the specified options
can execute models that use the custom operator specified in registration
. Please refer https://www.tensorflow.org/lite/guide/ops_custom for custom op support. This is an experimental API and subject to change.
TFL_CAPI_EXPORT TfLiteInterpreterOptions * TfLiteInterpreterOptionsCopy( const TfLiteInterpreterOptions *from )
Creates and returns a shallow copy of an options object.
The caller is responsible for calling TfLiteInterpreterOptionsDelete
to deallocate the object pointed to by the returned pointer.
TFL_CAPI_EXPORT TfLiteInterpreterOptions * TfLiteInterpreterOptionsCreate()
Returns a new interpreter options instances.
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsDelete( TfLiteInterpreterOptions *options )
Destroys the interpreter options instance.
TFL_CAPI_EXPORT TfLiteStatus TfLiteInterpreterOptionsEnableCancellation( TfLiteInterpreterOptions *options, bool enable )
Enables users to cancel in-flight invocations with TfLiteInterpreterCancel
.
By default it is disabled and calling to TfLiteInterpreterCancel
will return kTfLiteError. See TfLiteInterpreterCancel
.
TFL_CAPI_EXPORT void TfLiteInterpreterOptionsSetErrorReporter( TfLiteInterpreterOptions *options, void(*)(void *user_data, const char *format, va_list args) reporter, void *user_data )
Sets a custom error reporter for interpreter execution.
reporter
takes the provided user_data
object, as well as a C-style format string and arg list (see also vprintf).user_data
is optional. If non-null, it is owned by the client and must remain valid for the duration of the interpreter lifetime. TFL_CAPI_EXPORT void TfLiteInterpreterOptionsSetNumThreads( TfLiteInterpreterOptions *options, int32_t num_threads )
Sets the number of CPU threads to use for the interpreter.
TFL_CAPI_EXPORT const int * TfLiteInterpreterOutputTensorIndices( const TfLiteInterpreter *interpreter )
Returns a pointer to an array of output tensor indices.
The length of the array can be obtained via a call to TfLiteInterpreterGetOutputTensorCount
.
Typically the output tensors associated with an interpreter
would be set during the initialization of the interpreter
, through a mechanism like the InterpreterBuilder
, and remain unchanged throughout the lifetime of the interpreter. However, there are some circumstances in which the pointer may not remain valid throughout the lifetime of the interpreter, because calls to SetOutputs
on the interpreter invalidate the returned pointer.
The ownership of the array remains with the TFLite runtime.
TFL_CAPI_EXPORT TfLiteStatus TfLiteInterpreterResizeInputTensor( TfLiteInterpreter *interpreter, int32_t input_index, const int *input_dims, int32_t input_dims_size )
Resizes the specified input tensor.
REQUIRES: 0 <= input_index < TfLiteInterpreterGetInputTensorCount(tensor)
This function makes a copy of the input dimensions, so the client can safely deallocate input_dims
immediately after this function returns.
TFL_CAPI_EXPORT TfLiteModel * TfLiteModelCreate( const void *model_data, size_t model_size )
Returns a model from the provided buffer, or null on failure.
TFL_CAPI_EXPORT TfLiteModel * TfLiteModelCreateFromFile( const char *model_path )
Returns a model from the provided file, or null on failure.
TFL_CAPI_EXPORT TfLiteModel * TfLiteModelCreateFromFileWithErrorReporter( const char *model_path, void(*)(void *user_data, const char *format, va_list args) reporter, void *user_data )
Same as TfLiteModelCreateFromFile
with customizble error reporter.
reporter
takes the provided user_data
object, as well as a C-style format string and arg list (see also vprintf).user_data
is optional. If non-null, it is owned by the client and must remain valid for the duration of the interpreter lifetime. TFL_CAPI_EXPORT TfLiteModel * TfLiteModelCreateWithErrorReporter( const void *model_data, size_t model_size, void(*)(void *user_data, const char *format, va_list args) reporter, void *user_data )
Same as TfLiteModelCreate
with customizble error reporter.
reporter
takes the provided user_data
object, as well as a C-style format string and arg list (see also vprintf).user_data
is optional. If non-null, it is owned by the client and must remain valid for the duration of the interpreter lifetime. TFL_CAPI_EXPORT void TfLiteModelDelete( TfLiteModel *model )
Destroys the model instance.
TFL_CAPI_EXPORT int TfLiteSchemaVersion( void )
The supported TensorFlow Lite model file Schema version.
Returns the (major) version number of the Schema used for model files that is supported by the (potentially dynamically loaded) TensorFlow Lite Runtime.
Model files using schema versions different to this may not be supported by the current version of the TF Lite Runtime.
TFL_CAPI_EXPORT TfLiteStatus TfLiteSignatureRunnerAllocateTensors( TfLiteSignatureRunner *signature_runner )
Updates allocations for tensors associated with a signature and resizes dependent tensors using the specified input tensor dimensionality.
This is a relatively expensive operation and hence should only be called after initializing the signature runner object and/or resizing any inputs.
TFL_CAPI_EXPORT void TfLiteSignatureRunnerDelete( TfLiteSignatureRunner *signature_runner )
Destroys the signature runner.
TFL_CAPI_EXPORT size_t TfLiteSignatureRunnerGetInputCount( const TfLiteSignatureRunner *signature_runner )
Returns the number of inputs associated with a signature.
TFL_CAPI_EXPORT const char * TfLiteSignatureRunnerGetInputName( const TfLiteSignatureRunner *signature_runner, int32_t input_index )
Returns the (null-terminated) name of the Nth input in a signature, where N is specified as input_index
.
NOTE: The lifetime of the returned name is the same as (and depends on) the lifetime of signature_runner
.
TFL_CAPI_EXPORT TfLiteTensor * TfLiteSignatureRunnerGetInputTensor( TfLiteSignatureRunner *signature_runner, const char *input_name )
Returns the input tensor identified by input_name
in the given signature.
Returns nullptr if the given name is not valid.
NOTE: The lifetime of the returned tensor is the same as (and depends on) the lifetime of signature_runner
.
TFL_CAPI_EXPORT size_t TfLiteSignatureRunnerGetOutputCount( const TfLiteSignatureRunner *signature_runner )
Returns the number of output tensors associated with the signature.
TFL_CAPI_EXPORT const char * TfLiteSignatureRunnerGetOutputName( const TfLiteSignatureRunner *signature_runner, int32_t output_index )
Returns the (null-terminated) name of the Nth output in a signature, where N is specified as output_index
.
NOTE: The lifetime of the returned name is the same as (and depends on) the lifetime of signature_runner
.
TFL_CAPI_EXPORT const TfLiteTensor * TfLiteSignatureRunnerGetOutputTensor( const TfLiteSignatureRunner *signature_runner, const char *output_name )
Returns the output tensor identified by output_name
in the given signature.
Returns nullptr if the given name is not valid.
NOTE: The lifetime of the returned tensor is the same as (and depends on) the lifetime of signature_runner
.
TFL_CAPI_EXPORT TfLiteStatus TfLiteSignatureRunnerInvoke( TfLiteSignatureRunner *signature_runner )
Runs inference on a given signature.
Before calling this function, the caller should first invoke TfLiteSignatureRunnerAllocateTensors() and should also set the values for the input tensors. After successfully calling this function, the values for the output tensors will be set.
TFL_CAPI_EXPORT TfLiteStatus TfLiteSignatureRunnerResizeInputTensor( TfLiteSignatureRunner *signature_runner, const char *input_name, const int *input_dims, int32_t input_dims_size )
Resizes the input tensor identified as input_name
to be the dimensions specified by input_dims
and input_dims_size
.
Only unknown dimensions can be resized with this function. Unknown dimensions are indicated as -1
in the dims_signature
attribute of a TfLiteTensor.
Returns status of failure or success. Note that this doesn't actually resize any existing buffers. A call to TfLiteSignatureRunnerAllocateTensors() is required to change the tensor input buffer.
NOTE: This function is similar to TfLiteInterpreterResizeInputTensorStrict() and not TfLiteInterpreterResizeInputTensor().
NOTE: input_name
must match the name of an input in the signature.
NOTE: This function makes a copy of the input dimensions, so the caller can safely deallocate input_dims
immediately after this function returns.
TFL_CAPI_EXPORT size_t TfLiteTensorByteSize( const TfLiteTensor *tensor )
Returns the size of the underlying data in bytes.
TFL_CAPI_EXPORT TfLiteStatus TfLiteTensorCopyFromBuffer( TfLiteTensor *tensor, const void *input_data, size_t input_data_size )
Copies from the provided input buffer into the tensor's buffer.
REQUIRES: input_data_size == TfLiteTensorByteSize(tensor)
TFL_CAPI_EXPORT TfLiteStatus TfLiteTensorCopyToBuffer( const TfLiteTensor *output_tensor, void *output_data, size_t output_data_size )
Copies to the provided output buffer from the tensor's buffer.
REQUIRES: output_data_size == TfLiteTensorByteSize(tensor)
TFL_CAPI_EXPORT void * TfLiteTensorData( const TfLiteTensor *tensor )
Returns a pointer to the underlying data buffer.
TFL_CAPI_EXPORT int32_t TfLiteTensorDim( const TfLiteTensor *tensor, int32_t dim_index )
Returns the length of the tensor in the "dim_index" dimension.
REQUIRES: 0 <= dim_index < TFLiteTensorNumDims(tensor)
TFL_CAPI_EXPORT const char * TfLiteTensorName( const TfLiteTensor *tensor )
Returns the (null-terminated) name of the tensor.
TFL_CAPI_EXPORT int32_t TfLiteTensorNumDims( const TfLiteTensor *tensor )
Returns the number of dimensions that the tensor has.
Returns -1 in case the 'opaque_tensor' does not have its dimensions property set.
TFL_CAPI_EXPORT TfLiteQuantizationParams TfLiteTensorQuantizationParams( const TfLiteTensor *tensor )
Returns the parameters for asymmetric quantization.
The quantization parameters are only valid when the tensor type is kTfLiteUInt8
and the scale != 0
. Quantized values can be converted back to float using: real_value = scale * (quantized_value - zero_point);
TFL_CAPI_EXPORT TfLiteType TfLiteTensorType( const TfLiteTensor *tensor )
Returns the type of a tensor element.
TFL_CAPI_EXPORT const char * TfLiteVersion( void )
The TensorFlow Lite Runtime version.
Returns a pointer to a statically allocated string that is the version number of the (potentially dynamically loaded) TF Lite Runtime library. TensorFlow Lite uses semantic versioning, and the return value should be in semver 2 format http://semver.org, starting with MAJOR.MINOR.PATCH, e.g. "2.12.0" or "2.13.0-rc2".