* limitations under the License.
*/
+/**
+ * @file NeuralNetworksEx.h
+ * @brief This file contains ANeuralNetworksModel_addOperationEx function definition
+ * @ingroup COM_AI_RUNTIME
+ */
#ifndef NN_RUNTIME_NEURAL_NETWORKS_EX_H
#define NN_RUNTIME_NEURAL_NETWORKS_EX_H
__BEGIN_DECLS
+/**
+ * @brief Extended operation types
+ */
typedef enum {
/** extends operation. */
- ANEURALNETWORKS_CAST_EX = 50000,
- ANEURALNETWORKS_GATHER_EX = 50001,
- ANEURALNETWORKS_TOPK_V2_EX = 50002,
- ANEURALNETWORKS_TENSORFLOW_MAX_EX = 50003,
- ANEURALNETWORKS_SPLIT_EX = 50004,
- ANEURALNETWORKS_RSQRT_EX = 50005,
- ANEURALNETWORKS_SQUARED_DIFFERENCE_EX = 50006,
+ ANEURALNETWORKS_CAST_EX = 50000, /**< Casts a tensor to a new type */
+ ANEURALNETWORKS_GATHER_EX = 50001, /**< Gather slices according to indexes and axis */
+ ANEURALNETWORKS_TOPK_V2_EX = 50002, /**< Find values and indices of the k largest elements */
+ ANEURALNETWORKS_TENSORFLOW_MAX_EX = 50003, /**< Computes the maximum of elements across dimensions of a tensor */
+ ANEURALNETWORKS_SPLIT_EX = 50004, /**< Splits a tensor into sub tensors */
+ ANEURALNETWORKS_RSQRT_EX = 50005, /**< Computes reciprocal of square root of x element-wise */
+ ANEURALNETWORKS_SQUARED_DIFFERENCE_EX = 50006, /**< Returns (x-y)(x-y) element-wise */
ANEURALNETWORKS_NEG_EX = 50007,
ANEURALNETWORKS_EXP_EX = 50008,
ANEURALNETWORKS_TENSORFLOW_SUM_EX = 50009,
typedef OperationCodeEx ANeuralNetworksOperationTypeEx;
/**
- * Add an extended operation to a model.
- *
- * @param model The model to be modified.
- * @param type The type of extended operation.
- * @param inputCount The number of entries in the inputs array.
- * @param inputs An array of indexes identifying each operand.
- * @param outputCount The number of entries in the outputs array.
- * @param outputs An array of indexes identifying each operand.
- *
- * The operands specified by inputs and outputs must have been
- * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
+ * @brief Add an extended operation to a model.
*
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
- * called will return an error.
+ * @param[in] model The model to be modified.
+ * @param[in] type The type of extended operation.
+ * @param[in] inputCount The number of entries in the inputs array.
+ * @param[in] inputs An array of indexes identifying each operand.
+ * @param[in] outputCount The number of entries in the outputs array.
+ * @param[in] outputs An array of indexes identifying each operand.
*
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
+ * @note The operands specified by inputs and outputs must have been
+ * previously added by calls to {@link ANeuralNetworksModel_addOperand}.\n
+ * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has been
+ * called will return an error.\n
+ * See {@link ANeuralNetworksModel} for information on multithreaded usage.
*
* @return ANEURALNETWORKS_NO_ERROR if successful.
*/
* limitations under the License.
*/
+/**
+ * @file NeuralNetworksExShim.h
+ * @brief This file contains an actual implementation of ANeuralNetworksModel_addOperationEx function
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef NN_API_EX_SHIM_H
#define NN_API_EX_SHIM_H
const uint32_t *outputs);
/**
- * Add an extended operation to a model.
- *
- * @param model The model to be modified.
- * @param type The type of extended operation.
- * @param inputCount The number of entries in the inputs array.
- * @param inputs An array of indexes identifying each operand.
- * @param outputCount The number of entries in the outputs array.
- * @param outputs An array of indexes identifying each operand.
- *
- * The operands specified by inputs and outputs must have been
- * previously added by calls to {@link ANeuralNetworksModel_addOperand}.
+ * @brief Add an extended operation to a model.
*
- * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
- * been
- * called will return an error.
+ * @param[in] model The model to be modified.
+ * @param[in] type The type of extended operation.
+ * @param[in] inputCount The number of entries in the inputs array.
+ * @param[in] inputs An array of indexes identifying each operand.
+ * @param[in] outputCount The number of entries in the outputs array.
+ * @param[in] outputs An array of indexes identifying each operand.
*
- * See {@link ANeuralNetworksModel} for information on multithreaded usage.
+ * @note The operands specified by inputs and outputs must have been
+ * previously added by calls to {@link ANeuralNetworksModel_addOperand}.\n
+ * Attempting to modify a model once {@link ANeuralNetworksModel_finish} has
+ * been
+ * called will return an error.\n
+ * See {@link ANeuralNetworksModel} for information on multithreaded usage.
*
* @return ANEURALNETWORKS_NO_ERROR if successful.
*/
* limitations under the License.
*/
+/**
+ * @file NeuralNetworksLoadHelpers.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains functions to load NN API runtime library
+ */
+
#ifndef __NEURAL_NETWORKS_LOAD_HELPER_H__
#define __NEURAL_NETWORKS_LOAD_HELPER_H__
#include <stdio.h>
#include <stdlib.h>
+/**
+ * @brief Print log data
+ * @param[in] format Format string of @c printf
+ * @param[in] args Argument after format string. (Same with @c printf)
+ */
#define NNAPI_LOG(format, ...) printf(format "\n", __VA_ARGS__);
+
+/**
+ * @brief Create a function pointer named @c fn after loading NN API library
+ * @param[in] name Name of a function
+ */
#define LOAD_FUNCTION(name) \
static name##_fn fn = reinterpret_cast<name##_fn>(loadNNAPIFunction(#name));
+
+/**
+ * @brief Run @c fn function. @c fn is created by @ref LOAD_FUNCTION
+ * @param[in] args List of arguments for the function @c fn
+ */
#define EXECUTE_FUNCTION(...) \
if (fn != nullptr) { \
fn(__VA_ARGS__); \
}
+
+/**
+ * @brief Run @c fn function. @c fn is created by @ref LOAD_FUNCTION
+ * @param[in] args List of arguments for the function @c fn
+ * @return the return value of @c fn
+ */
#define EXECUTE_FUNCTION_RETURN(...) return fn != nullptr ? fn(__VA_ARGS__) : 0;
+/**
+ * @brief Load NN API library
+ * @param[in] name path of NN API library
+ * @return a symbol table handle of NN API library
+ */
inline void* loadNNAPILibrary(const char* name) {
// TODO: change RTLD_LOCAL? Assumes there can be multiple instances of nn
// api RT
return handle;
}
+/**
+ * @brief Load libneuralnetworks.so and return handle of library
+ * @return a symbol table handle of NN API library
+ */
inline void* getNNAPILibraryHandle() {
static void* handle = loadNNAPILibrary("libneuralnetworks.so");
return handle;
}
+/**
+ * @brief Return function ptr in libneuralnetworks.so
+ * @param[in] name Name of function
+ * @return function pointer
+ */
inline void* loadNNAPIFunction(const char* name) {
void* fn = nullptr;
if (getNNAPILibraryHandle() != nullptr) {
return fn;
}
+/**
+ * @brief Check if libneuralnetworks.so can be loaded
+ * @return @c true if loading is successful, otherwise @c false.
+ */
inline bool NNAPIExists() {
static bool nnapi_is_available = getNNAPILibraryHandle();
return nnapi_is_available;
+/**
+ * @file memory.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains @c make_unique which is not supported by C++11
+ */
#ifndef __NNFW_STD_MEMORY_H__
#define __NNFW_STD_MEMORY_H__
namespace nnfw
{
-
+/**
+ * @brief Provide @c make_unique function supported from C++14
+ * @param[in] args List of arguments with which an instance of T will be constructed.
+ * @return @c std::unique_ptr of an instance of type T
+ */
template <typename T, typename... Args> std::unique_ptr<T> make_unique(Args &&... args)
{
// NOTE std::make_unique is missing in C++11 standard
* limitations under the License.
*/
+/**
+ * @file Utils.h
+ * @brief This file contains utility functions
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_NNAPI_UTILS_H__
#define __NNFW_SUPPORT_NNAPI_UTILS_H__
namespace nnapi
{
+/**
+ * @brief Converts a PaddingCode to const char*
+ * @param[in] code The PaddingCode to be converted
+ * @return A string holding the converted value
+ */
const char *to_string(const PaddingCode &code);
} // namespace nnapi
* limitations under the License.
*/
+/**
+ * @file Reader.h
+ * @brief This file contains Reader class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_NNAPI_FEATURE_READER_H__
#define __NNFW_SUPPORT_NNAPI_FEATURE_READER_H__
namespace feature
{
+/**
+ * @brief Class to read value of feature which is inherited from nnfw::util::feature::Reader<T> class
+ */
template<typename T> class Reader : public nnfw::util::feature::Reader<T>
{
public:
+ /**
+ * @brief Construct a new Reader object with base and shape informations
+ * @param[in] shape The shape of a feature
+ * @param[in] base The base address of a feature
+ */
Reader(const nnfw::util::feature::Shape &shape, const T *base)
: _shape{shape}, _base{base}
{
}
public:
+ /**
+ * @brief Get the value used by three indexes
+ * @param[in] ch The channel index
+ * @param[in] row The row index
+ * @param[in] col The column index
+ * @return The value at the offset
+ */
T at(uint32_t ch, uint32_t row, uint32_t col) const override
{
return *(_base + indexOf(_shape, ch, row, col));
}
private:
- nnfw::util::feature::Shape _shape;
- const T *_base;
+ nnfw::util::feature::Shape _shape; /**< Shape of feature */
+ const T *_base; /**< Base address of feature */
};
} // namespace feature
* limitations under the License.
*/
+/**
+ * @file Utils.h
+ * @brief This file contains indexOf function
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_NNAPI_FEATURE_UTILS_H__
#define __NNFW_SUPPORT_NNAPI_FEATURE_UTILS_H__
namespace feature
{
+/**
+ * @brief Get index for given parameters
+ * @param[in] shape The shape of feature
+ * @param[in] ch The channel index
+ * @param[in] row The row index
+ * @param[in] col The column index
+ * @return The value index of given parameters
+ */
uint32_t indexOf(const nnfw::util::feature::Shape &shape, uint32_t ch, uint32_t row, uint32_t col);
} // namespace feature
* limitations under the License.
*/
+/**
+ * @file Assert.h
+ * @brief This file contains helper function of assertion
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_ASSERT_H__
#define __NNFW_SUPPORT_TFLITE_ASSERT_H__
* limitations under the License.
*/
+/**
+ * @file Diff.h
+ * @brief This file contains classes for testing correctess of implementation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_COMPARE_H__
#define __NNFW_SUPPORT_TFLITE_COMPARE_H__
#include <functional>
#include <vector>
+/**
+ * @brief Class to define TfLite interpreter match application
+ */
class TfLiteInterpMatchApp
{
public:
+ /**
+ * @brief Construct a new TfLiteInterpMatchApp object with Comparator
+ * @param[in] comparator Comparator object for tensor comparation
+ */
TfLiteInterpMatchApp(const nnfw::util::tensor::Comparator &comparator)
: _verbose{false}, _comparator(comparator)
{
}
public:
+ /**
+ * @brief Get reference verbose for debugging information
+ * @return Reference of verbose value
+ */
int &verbose(void) { return _verbose; }
private:
int _verbose;
public:
+ /**
+ * @brief Run two interpreter and return the output matching
+ * @param[in] pure Interpreter object of expected(with TfLite)
+ * @param[in] nnapi Interpreter object of obtained(through NNAPI)
+ * @return @c true if two Interpreter results are same, otherwise @c false
+ */
bool run(::tflite::Interpreter &pure, ::tflite::Interpreter &nnapi) const;
+ /**
+ * @brief Compare two TensorView values and return the match result
+ * @param[in] expected TensorView object to read expected values
+ * @param[in] obtained TensorView object to read obtained values
+ * @param[in] id Tensor ID value used for debug message
+ * @return @c true if two TensorView values are same, otherwise @c false
+ */
template <typename T>
bool compareSingleTensorView(const nnfw::support::tflite::TensorView<T> &expected,
const nnfw::support::tflite::TensorView<T> &obtained,
#include <random>
+/**
+ * @brief Class to generate random values
+ */
class RandomGenerator
{
public:
+ /**
+ * @brief Construct a new RandomGenerator object
+ * @param[in] seed Random seed value
+ * @param[in] mean Mean value of normal random number generation
+ * @param[in] stddev Standard deviation of random number generation
+ * @param[in] quantization TfLiteQuantizationParams type to represent quantization value
+ * (not used yet)
+ */
RandomGenerator(int seed, float mean, float stddev,
const TfLiteQuantizationParams quantization = make_default_quantization())
: _rand{seed}, _dist{mean, stddev}, _quantization{quantization}
}
public:
+ /**
+ * @brief Generate random numbers for type T
+ * @param[in] s Shape value
+ * @param[in] i Index value
+ * @return Random generated value
+ * @note This is same as T generate(void) as two input parameters are not used
+ */
template <typename T>
T generate(const ::nnfw::util::tensor::Shape &, const ::nnfw::util::tensor::Index &)
{
return generate<T>();
}
+ /**
+ * @brief Generate random numbers for type T
+ * @return Random generated value
+ */
template <typename T> T generate(void)
{
return _dist(_rand);
template <>
uint8_t RandomGenerator::generate<uint8_t>(void);
-// For NNAPI testing
+/**
+ * @brief Structure for NNAPI correctness test
+ */
struct RandomTestParam
{
- int verbose;
- int tolerance;
- int tensor_logging = 0;
- std::string log_path = ""; // meaningful only when tensor_logging is 1
+ int verbose; //!< Verbosity of debug information
+ int tolerance; //!< Torlerance of value difference
+ int tensor_logging = 0; //!< Save logging to a file if not 0
+ std::string log_path = ""; //!< Path of log file, meaningful only when tensor_logging is 1
};
+/**
+ * @brief Class to define Random test runner
+ */
class RandomTestRunner
{
public:
+ /**
+ * @brief Construct a new RandomTestRunner object
+ * @param[in] seed Random seed value
+ * @param[in] param RandomTestParam object for test runner
+ * @param[in] quantization TfLiteQuantizationParams type to represent quantization value
+ */
RandomTestRunner(int seed, const RandomTestParam ¶m,
const TfLiteQuantizationParams quantization = make_default_quantization())
: _randgen{seed, 0.0f, 2.0f, quantization}, _param{param}
}
public:
- // NOTE this method updates '_rand'
- // Return 0 if test succeeds
+ /**
+ * @brief Run the random test runner
+ * @param[in] builder Interpreter Builder used to run
+ * @return 0 if test succeeds, otherwise failure
+ */
int run(const nnfw::support::tflite::interp::Builder &builder);
public:
+ /**
+ * @brief Get RandomGenerator reference
+ * @return RandomGenerator reference
+ */
RandomGenerator &generator() { return _randgen; };
private:
const RandomTestParam _param;
public:
+ /**
+ * @brief Create a RandomTestRunner object
+ * @param[in] seed Random seed value
+ * @return RandomGenerator object
+ */
static RandomTestRunner make(int seed);
};
* limitations under the License.
*/
+/**
+ * @file FeatureView.h
+ * @brief This file contains FeatureView class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_FEATURE_VIEW_H__
#define __NNFW_SUPPORT_TFLITE_FEATURE_VIEW_H__
template<typename T> class FeatureView;
+/**
+ * @brief Class to support reading element of float type feature
+ */
template<> class FeatureView<float> : public nnfw::util::feature::Reader<float>
{
public:
+ /**
+ * @brief Construct a new FeatureView object
+ * @param[in] interp Interpreter to read from
+ * @param[in] index InputIndex index of input
+ */
FeatureView(::tflite::Interpreter &interp, const InputIndex &index);
+ /**
+ * @brief Construct a new FeatureView object
+ * @param[in] interp Interpreter to read from
+ * @param[in] index OutputIndex index of output
+ */
FeatureView(::tflite::Interpreter &interp, const OutputIndex &index);
public:
+ /**
+ * @brief Get value of element using channel, row and column index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Value of element
+ */
float at(uint32_t ch, uint32_t row, uint32_t col) const;
+ /**
+ * @brief Get reference of element using channel, row and column index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Reference of element
+ */
float &at(uint32_t ch, uint32_t row, uint32_t col);
private:
+ /**
+ * @brief Get offset of element from channel, row and column index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Offset of element
+ */
uint32_t getElementOffset(uint32_t ch, uint32_t row, uint32_t col) const
{
uint32_t res = 0;
* limitations under the License.
*/
+/**
+ * @file InputIndex.h
+ * @brief This file contains InputIndex class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_INPUT_INDEX_H__
#define __NNFW_SUPPORT_TFLITE_INPUT_INDEX_H__
namespace tflite
{
+/**
+ * @brief Class to express index of input
+ */
class InputIndex
{
public:
+ /**
+ * @brief Construct a new InputIndex object with index value
+ * @param [in] index The value of index
+ */
InputIndex(int index) : _index(index)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get index value as int
+ * @return Index value as int
+ */
int asInt(void) const { return _index; }
private:
* limitations under the License.
*/
+/**
+ * @file InterpreterSession.h
+ * @brief This file contains InterpreterSession class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_INTERPRETER_SESSION_H__
#define __NNFW_SUPPORT_TFLITE_INTERPRETER_SESSION_H__
namespace tflite
{
+/**
+ * @brief Class to define TfLite interpreter session which is inherited from Session class
+ */
class InterpreterSession final : public Session
{
public:
+ /**
+ * @brief Construct a InterpreterSession object with interpreter of TfLite
+ * @param[in] interp The TfLite interpreter pointer
+ */
InterpreterSession(::tflite::Interpreter *interp) : _interp{interp}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get TfLite interpreter pointer
+ * @return The TfLite interpreter
+ */
::tflite::Interpreter *interp(void) override { return _interp; }
public:
+ /**
+ * @brief Prepare the TfLite interpreter session
+ * @return @c true if tensor preparation is successful, otherwise @c false
+ */
bool prepare(void) override
{
_interp->UseNNAPI(false);
return true;
}
+ /**
+ * @brief Run the Invoke function of TfLite interpreter
+ * @return @c true if Invoke() is successful, otherwise @c false
+ */
bool run(void) override
{
// Return true if Invoke returns kTfLiteOk
return kTfLiteOk == _interp->Invoke();
}
+ /**
+ * @brief Tear down TfLite interpreter session
+ * @return @c true always
+ */
bool teardown(void) override
{
// Do NOTHING currently
* limitations under the License.
*/
+/**
+ * @file NNAPISession.h
+ * @brief This file contains NNAPISession class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_NNAPI_SESSION_H__
#define __NNFW_SUPPORT_TFLITE_NNAPI_SESSION_H__
namespace tflite
{
+/**
+ * @brief Class to define NNAPI interpreter session which is inherited from Session class
+ */
class NNAPISession final : public Session
{
public:
+ /**
+ * @brief Construct a NNAPISession object with interpreter of TfLite
+ * @param[in] interp The TfLite interpreter pointer
+ * @note Invoke BuildGraph() of NNAPI delegate from Interpreter
+ */
NNAPISession(::tflite::Interpreter *interp) : _interp{interp}
{
// Construct Graph from Interpreter
}
public:
+ /**
+ * @brief Get TfLite interpreter pointer
+ * @return The TfLite interpreter
+ */
::tflite::Interpreter *interp(void) override { return _interp; }
public:
+ /**
+ * @brief Prepare the TfLite interpreter session
+ * @return @c true if tensor preparation is successful, otherwise @c false
+ */
bool prepare(void) override
{
// Explicitly turn off T/F lite internal NNAPI delegation in order to use locally defined
return true;
}
+ /**
+ * @brief Run the Invoke function of NNAPI delegate
+ * @return @c true if Invoke() is successful, otherwise @c false
+ */
bool run(void) override
{
return kTfLiteOk == _delegate.Invoke(_interp);
}
+ /**
+ * @brief Tear down TfLite interpreter session
+ * @return @c true always
+ */
bool teardown(void) override
{
// DO NOTHING
* limitations under the License.
*/
+/**
+ * @file OutputIndex.h
+ * @brief This file contains OutputIndex class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_OUTPUT_INDEX_H__
#define __NNFW_SUPPORT_TFLITE_OUTPUT_INDEX_H__
namespace tflite
{
+/**
+ * @brief Class to define OutputIndex
+ */
class OutputIndex
{
public:
+ /**
+ * @brief Construct a OutputIndex object with index value
+ * @param[in] index The value of index
+ */
OutputIndex(int index) : _index(index)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get index value as int
+ * @return Index valuel as int
+ */
int asInt(void) const { return _index; }
private:
* limitations under the License.
*/
+/**
+ * @file Quantization.h
+ * @brief This file contains BitwiseIntToFloat union and quantization related
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_QUANTIZATION_H__
#define __NNFW_SUPPORT_TFLITE_QUANTIZATION_H__
+/**
+ * @brief Union to provide bitwise conversion of integer and float
+ */
union BitwiseIntToFloat {
int i;
float f;
#include "tensorflow/contrib/lite/context.h"
+/**
+ * @brief Get TfLiteQuantizationParams object with default values
+ * @return TfLiteQuantizationParams object
+ */
TfLiteQuantizationParams make_default_quantization(void);
#endif // __NNFW_SUPPORT_TFLITE_QUANTIZATION_H__
* limitations under the License.
*/
+/**
+ * @file Session.h
+ * @brief This file contains Session class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_SESSION_H__
#define __NNFW_SUPPORT_TFLITE_SESSION_H__
namespace tflite
{
+/**
+ * @brief Structure to provide interface methods of interpreter session
+ */
struct Session
{
+ /**
+ * @brief Destruct Session object using default destructor
+ */
virtual ~Session() = default;
+ /**
+ * @brief Get the Interpreter object pointer
+ * @return The Interpreter object pointer
+ */
virtual ::tflite::Interpreter *interp(void) = 0;
+ /**
+ * @brief Prepare the session
+ * @return @c true if prepare method succeeded, otherwise @c false
+ */
virtual bool prepare(void) = 0;
+ /**
+ * @brief Run the session
+ * @return @c true if run method succeeded, otherwise @c false
+ */
virtual bool run(void) = 0;
+ /**
+ * @brief Teardown(release) the session
+ * @return @c true if teardown method succeeded, otherwise @c false
+ */
virtual bool teardown(void) = 0;
};
* limitations under the License.
*/
+/**
+ * @file TensorLogger.h
+ * @brief This file contains TensorLogger class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_TENSOR_LOGGER_H__
#define __NNFW_SUPPORT_TFLITE_TENSOR_LOGGER_H__
namespace tflite
{
-/*
-This is a utility to write input and output value / shape into a file in python form.
-any python app can load this value by running the python code below:
-
- exec(open(filename).read())
-
-generated python code looks like the following:
-
-# ------------- test name -------------
-tensor_shape_gen = []
-tensor_value_gen = []
-
-tensor_shape_gen.append("{2, 1, 2}")
-tensor_value_gen.append([1, 2, 3, 4])
-
-tensor_shape_gen.append("{2}")
-tensor_value_gen.append([1, 2])
-
-tensor_shape_gen.append("{2, 1, 2}")
-tensor_value_gen.append([1, 4, 3, 8])
-# -----------------------------------------
-*/
-
+/**
+ * @brief Class to write input and output value / shape into a file in python form
+ * @note This is a utility to write input and output value / shape into a file in python form.\n
+ * any python app can load this value by running the python code below:\n
+ * exec(open(filename).read())\n
+ * generated python code looks like the following: \n
+ * tensor_shape_gen = []\n
+ * tensor_value_gen = []\n\n
+ * tensor_shape_gen.append("{2, 1, 2}")\n
+ * tensor_value_gen.append([1, 2, 3, 4])\n\n
+ * tensor_shape_gen.append("{2}")\n
+ * tensor_value_gen.append([1, 2])\n\n
+ * tensor_shape_gen.append("{2, 1, 2}")\n
+ * tensor_value_gen.append([1, 4, 3, 8])\n
+ */
class TensorLogger
{
private:
std::ofstream _outfile;
public:
-
+ /**
+ * @brief Get TensorLogger instance
+ * @return The TensorLogger instance
+ */
static TensorLogger &instance()
{
static TensorLogger instance;
return instance;
}
+ /**
+ * @brief Save the tensor details to file from interpreter
+ * @param[in] path The file path to save
+ * @param[in] interp The TfLite interpreter
+ */
void save(const std::string &path, ::tflite::Interpreter &interp)
{
open(path);
* limitations under the License.
*/
+/**
+ * @file TensorShapeUtils.h
+ * @brief This file contains utilities function of tensor shape
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_TENSOR_SHAPE_UTILS_H__
#define __NNFW_SUPPORT_TFLITE_TENSOR_SHAPE_UTILS_H__
namespace tflite
{
-// Converts tensor::Shape into a vector
+/**
+ * @brief Converts tensor::Shape into a vector
+ * @param[in] shape The tensor shape to be converted
+ * @return vector value of given shape object
+ */
static inline std::vector<int32_t> as_dims(const nnfw::util::tensor::Shape &shape)
{
std::vector<int32_t> dims;
return dims;
}
+/**
+ * @brief Broadcasts between two given shapes
+ * @param[in] lhs_shape The left hand side shape
+ * @param[in] rhs_shape The right hand side shape
+ * @return The broadcasted shape
+ */
nnfw::util::tensor::Shape broadcast(const nnfw::util::tensor::Shape &lhs_shape,
const nnfw::util::tensor::Shape &rhs_shape);
* limitations under the License.
*/
+/**
+ * @file TensorUtils.h
+ * @brief This file contains utilities function
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_TENSOR_UTILS_H__
#define __NNFW_SUPPORT_TFLITE_TENSOR_UTILS_H__
namespace tflite
{
+/**
+ * @brief Get @c true if tensor type is kTfLiteFloat32, otherwise @c false
+ * @param[in] tensor The tensor object to be compared
+ * @return @c true if tensor type is kTfLiteFloat32, otherwise @c false
+ */
inline bool isFloatTensor(const TfLiteTensor *tensor)
{
return tensor->type == kTfLiteFloat32;
}
+/**
+ * @brief Get @c true if tensor is 4-D tensor and the first dimension length is 1, otherwise @c false
+ * @param[in] tensor The tensor object to be compared
+ * @return @c true if tensor is 4-D tensor and the first dimension length is 1, otherwise @c false
+ */
inline bool isFeatureTensor(const TfLiteTensor *tensor)
{
return (tensor->dims->size == 4) && (tensor->dims->data[0] == 1);
* limitations under the License.
*/
+/**
+ * @file TensorView.h
+ * @brief This file contains TensorView class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_TENSOR_VIEW_H__
#define __NNFW_SUPPORT_TFLITE_TENSOR_VIEW_H__
namespace tflite
{
+/**
+ * @brief Class to define TensorView which is inherited from nnfw::util::tensor::Reader<T> class
+ */
template<typename T> class TensorView final : public nnfw::util::tensor::Reader<T>
{
public:
+ /**
+ * @brief Construct a TensorView object with base and shape informations
+ * @param[in] shape The shape of a tensor
+ * @param[in] base The base address of a tensor
+ */
TensorView(const nnfw::util::tensor::Shape &shape, T *base)
: _shape{shape}, _base{base}
{
}
public:
+ /**
+ * @brief Get shape of tensor
+ * @return Reference of shape
+ */
const nnfw::util::tensor::Shape &shape(void) const { return _shape; }
public:
+ /**
+ * @brief Get value of tensor index
+ * @param[in] index The tensor index
+ * @return The value at the index
+ */
T at(const nnfw::util::tensor::Index &index) const override
{
const auto offset = _stride.offset(index);
}
public:
+ /**
+ * @brief Get reference value of tensor index
+ * @param[in] index The tensor index
+ * @return The reference value at the index
+ */
T &at(const nnfw::util::tensor::Index &index)
{
const auto offset = _stride.offset(index);
}
private:
- nnfw::util::tensor::Shape _shape;
+ nnfw::util::tensor::Shape _shape; /**< The tensor shape */
public:
- T *_base;
- nnfw::util::tensor::NonIncreasingStride _stride;
+ T *_base; /**< The base address of tensor */
+ nnfw::util::tensor::NonIncreasingStride _stride; /**< The NonIncreasingStride object */
public:
// TODO Introduce Operand ID class
+ /**
+ * @brief Create TensorView object using given parameters
+ * @param[in] interp The TfLite interpreter
+ * @param[in] tensor_index The tensor index
+ * @return The new TensorView<T> object
+ */
static TensorView<T> make(::tflite::Interpreter &interp, int tensor_index)
{
auto tensor_ptr = interp.tensor(tensor_index);
* limitations under the License.
*/
+/**
+ * @file Builder.h
+ * @brief This file contains Builder structure
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_INTERP_BUILDER_H__
#define __NNFW_SUPPORT_TFLITE_INTERP_BUILDER_H__
namespace interp
{
+/**
+ * @brief Structure to Builder
+ */
struct Builder
{
+ /**
+ * @brief Destroy the Builder object
+ */
virtual ~Builder() = default;
+ /**
+ * @brief Build a FlatBuffer model
+ * @return The TfLite interpreter object
+ */
virtual std::unique_ptr<::tflite::Interpreter> build(void) const = 0;
};
* limitations under the License.
*/
+/**
+ * @file FlatBufferBuilder.h
+ * @brief This file contains FlatBufferBuilder class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
#define __NNFW_SUPPORT_TFLITE_INTERP_FLAT_BUFFER_BUILDER_H__
namespace interp
{
+/**
+ * @brief Class to define FlatBufferBuilder which is inherited from Builder
+ */
class FlatBufferBuilder final : public Builder
{
public:
+ /**
+ * @brief Construct a FlatBufferBuilder object with FlatBufferModel of TfLite
+ * @param[in] model The TfLite Flatbuffer model
+ */
FlatBufferBuilder(const ::tflite::FlatBufferModel &model) : _model{model}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Build a FlatBuffer model
+ * @return The TfLite interpreter pointer address
+ */
std::unique_ptr<::tflite::Interpreter> build(void) const override;
private:
* limitations under the License.
*/
+/**
+ * @file FunctionBuilder.h
+ * @brief This file contains FunctionBuilder class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_INTERP_FUNCTION_BUILDER_H__
#define __NNFW_SUPPORT_TFLITE_INTERP_FUNCTION_BUILDER_H__
namespace interp
{
+/**
+ * @brief Class to define FunctionBuilder which is inherited from Builder
+ */
class FunctionBuilder final : public Builder
{
public:
using SetupFunc = std::function<void (::tflite::Interpreter &)>;
public:
+ /**
+ * @brief Construct a FunctionBuilder object with SetupFunction
+ * @param[in] fn The SetupFunc object
+ */
FunctionBuilder(const SetupFunc &fn) : _fn{fn}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Build a SetupFunc
+ * @return The TfLite interpreter pointer address
+ */
std::unique_ptr<::tflite::Interpreter> build(void) const override;
private:
* limitations under the License.
*/
+/**
+ * @file CustomOps.h
+ * @brief This file contains registration of custom operands
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_KERNELS_CUSTOM_OP_H__
#define __NNFW_SUPPORT_TFLITE_KERNELS_CUSTOM_OP_H__
* limitations under the License.
*/
+/**
+ * @file RSQRT.h
+ * @brief This file contains RSQRT namespace and RSQRT function definitions
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_KERNELS_RSQRT_H__
#define __NNFW_SUPPORT_TFLITE_KERNELS_RSQRT_H__
{
namespace RSQRT
{
-
+ /**
+ * @brief Initialize RSQRT operand using the contents of buffer
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @param[in] length The buffer length
+ * @return The void pointer for user data
+ */
void *InitRSQRT(TfLiteContext *context, const char *buffer, size_t length);
+
+ /**
+ * @brief Release any memory it might have allocated via 'InitRSQRT'
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @return N/A
+ */
void FreeRSQRT(TfLiteContext *context, void *buffer);
+
+ /**
+ * @brief Prepare the RSQRT operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
TfLiteStatus PrepareRSQRT(TfLiteContext *context, TfLiteNode *node);
+
+ /**
+ * @brief Evaluation the RSQRT operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
TfLiteStatus EvalRSQRT(TfLiteContext *context, TfLiteNode *node);
} // namespace RSQRT
* limitations under the License.
*/
+/**
+ * @file SquaredDifference.h
+ * @brief This file contains SquaredDifference namespace and SquaredDifference function definitions
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_KERNELS_SQUARED_DIFFERENCE_H__
#define __NNFW_SUPPORT_TFLITE_KERNELS_SQUARED_DIFFERENCE_H__
namespace SquaredDifference
{
+ /**
+ * @brief Initialize SquaredDifference operand using the contents of buffer
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @param[in] length The buffer length
+ * @return The void pointer for user data
+ */
void *InitSquaredDifference(TfLiteContext *context, const char *buffer, size_t length);
+
+ /**
+ * @brief Release any memory it might have allocated via 'InitSquaredDifference'
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @return N/A
+ */
void FreeSquaredDifference(TfLiteContext *context, void *buffer);
+
+ /**
+ * @brief Prepare the SquaredDifference operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node);
+
+ /**
+ * @brief Evaluation the SquaredDifference operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
TfLiteStatus EvalSquaredDifference(TfLiteContext *context, TfLiteNode *node);
} // namespace SquaredDifference
* limitations under the License.
*/
+/**
+ * @file TensorFlowMax.h
+ * @brief This file contains TensorFlowMax namespace and TensorFlowMax function definitions
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_SUPPORT_TFLITE_KERNELS_TENSORFLOW_MAX_H__
#define __NNFW_SUPPORT_TFLITE_KERNELS_TENSORFLOW_MAX_H__
namespace TensorFlowMax
{
+ /**
+ * @brief Initialize TensorFlowMax operand using the contents of buffer
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @param[in] length The buffer length
+ * @return The void pointer for user data
+ */
void *InitTensorFlowMax(TfLiteContext *context, const char *buffer, size_t length);
+
+ /**
+ * @brief Release any memory it might have allocated via 'InitTensorFlowMax'
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @return N/A
+ */
void FreeTensorFlowMax(TfLiteContext *context, void *buffer);
+
+ /**
+ * @brief Prepare the TensorFlowMax operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
TfLiteStatus PrepareTensorFlowMax(TfLiteContext *context, TfLiteNode *node);
+
+ /**
+ * @brief Evaluation the TensorFlowMax operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
TfLiteStatus EvalTensorFlowMax(TfLiteContext *context, TfLiteNode *node);
} // namespace TensorFlowMax
* limitations under the License.
*/
+/**
+ * @file EnvVar.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::EnvVar class
+ */
+
#ifndef __NNFW_UTIL_ENV_VAR__
#define __NNFW_UTIL_ENV_VAR__
{
namespace util
{
-
+/**
+ * @brief Class to access environment variable
+ */
class EnvVar
{
public:
+ /**
+ * @brief Construct a new EnvVar object
+ * @param[in] key environment variable
+ */
EnvVar(const std::string &key)
{
const char *value = std::getenv(key.c_str());
}
}
+ /**
+ * @brief Get environment variable of string type
+ * @param[in] def Default value of environment variable
+ * @return Defaut value passed as a parameter when there is no environment variable,
+ * otherwise the value of environment variable passed into constructor
+ */
std::string asString(const std::string &def) const
{
if (_value.empty())
return _value;
}
+ /**
+ * @brief Get environment variable of boolean type
+ * @param[in] def Default value of environment variable
+ * @return Defaut value passed as a parameter when there is no environment variable,
+ * otherwise the value of environment variable passed into constructor
+ */
bool asBool(bool def) const
{
if (_value.empty())
return (false_found == false_list.end());
}
+ /**
+ * @brief Get environment variable of int type
+ * @param[in] def Default value of environment variable
+ * @return Defaut value passed as a parameter when there is no environment variable,
+ * otherwise the value of environment variable passed into constructor
+ */
int asInt(int def) const
{
if (_value.empty())
* limitations under the License.
*/
+/**
+ * @file benchmark.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::benchmark::Accumulator class
+ */
#ifndef __NNFW_UTIL_BENCHMARK_H__
#define __NNFW_UTIL_BENCHMARK_H__
namespace benchmark
{
+/**
+ * @brief Class to accumulate time during benchmark
+ */
template <typename T> class Accumulator
{
public:
+ /**
+ * @brief Construct a new Accumulator object
+ * @param[in] ref Object to keep time duration
+ */
Accumulator(T &ref) : _ref(ref)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Return the reference of @c ref passed to constructor
+ * @return Reference of @c ref
+ */
T &operator()(void) { return _ref; }
private:
T &_ref;
};
+/**
+ * @brief Run passed function and returns accumulated time
+ * @tparam T Period used by @c std::chrono::duration_cast
+ * @tparam Callable Function type to benchmark
+ * @param[in] acc Accumulated time after running @cb
+ * @param[in] cb Function to run and benchmark
+ * @return Accumulated time
+ */
template <typename T, typename Callable>
Accumulator<T> &operator<<(Accumulator<T> &&acc, Callable cb)
{
* limitations under the License.
*/
+/**
+ * @file environment.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains utility functions and classes to access environment variables
+ */
+
#ifndef __UTIL_ENVIRONMENT_H__
#define __UTIL_ENVIRONMENT_H__
namespace util
{
+/**
+ * @brief Get the environment variable of int type
+ * @param[in] name Name of the environment variable
+ * @param[in] defaultValue Default value when the value of environment variable does not exist
+ * @return The int value of the environment variable
+ */
int get_env_int(const char *name, int defaultValue = 0);
+
+/**
+ * @brief Get the environment variable of bool type
+ * @param[in] name Name of the environment variable
+ * @param[in] defaultValue Default value when the value of environment variable does not exist
+ * @return @c 0 if the value of the environment variable is @c "0", @c 1 in case of other number
+ */
bool get_env_bool(const char *name, bool defaultValue = false);
}
}
{
namespace env
{
-
+/**
+ * @brief Parent struct of @ref IntAccessor and @ref FloatAccessor
+ * @tparam T Type of the value of environment variable
+ */
template <typename T> struct Accessor
{
+ /**
+ * @brief Destroy the Accessor object
+ */
virtual ~Accessor() = default;
-
+ /**
+ * @brief Read the value of environment variable
+ * @param[out] out The value of environment variable
+ * @return @c true if accessing environment variable is successful,
+ * @c false if there is exist no such environment variable
+ */
virtual bool access(T &out) const = 0;
};
+/**
+ * @brief Class to read int environment variable
+ */
class IntAccessor : public Accessor<int>
{
public:
+ /**
+ * @brief Construct a new IntAccessor object
+ * @param[in] tag Name of environment variable
+ */
IntAccessor(const std::string &tag);
public:
+ /**
+ * @brief Read the value of environment variable
+ * @param[out] out The value of environment variable
+ * @return @c true if accessing environment variable is successful,
+ * @c false if there is exist no such environment variable
+ */
bool access(int &out) const override;
private:
std::string _tag;
};
+/**
+ * @brief Class to read float environment variable
+ */
class FloatAccessor : public Accessor<float>
{
public:
+ /**
+ * @brief Construct a new FloatAccessor object
+ * @param[in] tag Name of environment variable
+ */
FloatAccessor(const std::string &tag);
public:
+ /**
+ * @brief Read the value of environment variable
+ * @param[out] out The value of environment variable
+ * @return @c true if accessing environment variable is successful,
+ * @c false if there is exist no such environment variable
+ */
bool access(float &out) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Index.h
+ * @brief This file contains Index class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_FEATURE_INDEX_H__
#define __NNFW_UTIL_FEATURE_INDEX_H__
namespace feature
{
+/**
+ * @brief Class to have the index information for calculating the offset.
+ */
class Index
{
public:
+ /**
+ * @brief Construct Index object using default constrcutor
+ */
Index() = default;
public:
+ /**
+ * @brief Construct Index object with three indexes of dimensions
+ * @param[in] ch The depth index
+ * @param[in] row The heigth index
+ * @param[in] col The width index
+ */
Index(int32_t ch, int32_t row, int32_t col) : _batch{1}, _ch{ch}, _row{row}, _col{col}
{
// DO NOTHING
}
+ /**
+ * @brief Construct Index object with four indexes of dimensions
+ * @param[in] batch The batch index
+ * @param[in] ch The depth index
+ * @param[in] row The height index
+ * @param[in] col The width index
+ */
Index(int32_t batch, int32_t ch, int32_t row, int32_t col) : _batch{batch}, _ch{ch}, _row{row}, _col{col}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get the batch index
+ * @return The batch index
+ */
int32_t batch(void) const { return _batch; }
+ /**
+ * @brief Get the depth index
+ * @return The depth index
+ */
int32_t ch(void) const { return _ch; }
+ /**
+ * @brief Get the height index
+ * @return The height index
+ */
int32_t row(void) const { return _row; }
+ /**
+ * @brief Get the width index
+ * @return The width index
+ */
int32_t col(void) const { return _col; }
public:
+ /**
+ * @brief Get the batch index as the lvalue reference
+ * @return The reference of the batch value
+ */
int32_t &batch(void) { return _batch; }
+ /**
+ * @brief Get the depth index as the lvalue reference
+ * @return The reference of the depth value
+ */
int32_t &ch(void) { return _ch; }
+ /**
+ * @brief Get the height index as the lvalue reference
+ * @return The reference of the height value
+ */
int32_t &row(void) { return _row; }
+ /**
+ * @brief Get the width index as the lvalue reference
+ * @return The reference of the width value
+ */
int32_t &col(void) { return _col; }
private:
+ /**
+ * @brief The batch index
+ */
int32_t _batch;
+ /**
+ * @brief The depth index
+ */
int32_t _ch;
+ /**
+ * @brief The height index
+ */
int32_t _row;
+ /**
+ * @brief The width index
+ */
int32_t _col;
};
* limitations under the License.
*/
+/**
+ * @file IndexIterator.h
+ * @brief This file contains IndexIterator class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_FEATURE_INDEX_ITERATOR_H__
#define __NNFW_UTIL_FEATURE_INDEX_ITERATOR_H__
namespace feature
{
+/**
+ * @brief Class to iterate Callable with Index of feature
+ */
class IndexIterator
{
public:
+ /**
+ * @brief Construct IndexIterator object with Shape of feature
+ * @param[in] shape Shape reference of feature
+ */
IndexIterator(const Shape &shape) : _shape{shape}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Call a function iterated
+ * @param[in] cb A callback function
+ * @return Current IndexIterator object
+ */
template <typename Callable> IndexIterator &iter(Callable cb)
{
for (int32_t batch = 0; batch < _shape.N; ++batch)
}
private:
+ /**
+ * @brief Shape for feature
+ */
const Shape _shape;
};
+/**
+ * @brief Create an object of IndexIterator for feature
+ * @param[in] Shape reference of feature
+ * @return Created IndexIterator object
+ */
static inline IndexIterator iterate(const Shape &shape) { return IndexIterator{shape}; }
+/**
+ * @brief Call a function iterated using IndexIterator of feature
+ * Overloaded operator<<
+ * @param[in] it An IndexIterator reference
+ * @param[in] cb A callback function
+ * @return created IndexIterator object
+ */
template <typename Callable> IndexIterator &operator<<(IndexIterator &&it, Callable cb)
{
return it.iter(cb);
* limitations under the License.
*/
+/**
+ * @file Object.h
+ * @brief This file contains Object class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_FEATURE_OBJECT_H__
#define __NNFW_UTIL_FEATURE_OBJECT_H__
namespace feature
{
+/**
+ * @brief Class to have information of the operand for feature
+ */
template <typename T> class Object final : public Reader<T>
{
public:
using Generator = std::function<T(const Shape &shape, const Index &index)>;
public:
+ /**
+ * @brief Construct Object object with Shape of feature and set value used by Generator
+ * @param[in] shape Reference of Shape for feature
+ * @param[in] fn A function to set values of operand tensor
+ */
Object(const Shape &shape, const Generator &fn) : _shape{shape}
{
_value.resize(_shape.C * _shape.H * _shape.W);
}
public:
+ /**
+ * @brief Get Shape of feature as the reference
+ * @return The reference of the width value
+ */
const Shape &shape(void) const { return _shape; }
public:
+ /**
+ * @brief Get the value used by three indexes
+ * @param[in] ch The depth index
+ * @param[in] row The height index
+ * @param[in] col The width index
+ * @return The value at the offset
+ */
T at(uint32_t ch, uint32_t row, uint32_t col) const override
{
return _value.at(offsetOf(ch, row, col));
}
private:
+ /**
+ * @brief Get the offset value at three indexes
+ * @param[in] ch The depth index
+ * @param[in] row The height index
+ * @param[in] col The width index
+ * @return The offset value
+ */
uint32_t offsetOf(uint32_t ch, uint32_t row, uint32_t col) const
{
return ch * _shape.H * _shape.W + row * _shape.W + col;
}
private:
+ /**
+ * @brief Shape of operand
+ */
Shape _shape;
+ /**
+ * @brief The tensor vector of operand
+ */
std::vector<T> _value;
};
* limitations under the License.
*/
+/**
+ * @file Reader.h
+ * @brief This file contains Reader class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_FEATURE_READER_H__
#define __NNFW_UTIL_FEATURE_READER_H__
namespace feature
{
+/**
+ * @brief Class reads values of feature
+ * The interface class
+ */
template <typename T> struct Reader
{
+ /**
+ * @brief Destruct Reader object using default destructor
+ */
virtual ~Reader() = default;
+ /**
+ * @brief Get the value used by three indexes
+ * @param[in] ch The depth index
+ * @param[in] row The height index
+ * @param[in] col The width index
+ * @return The value at the offset
+ */
virtual T at(uint32_t ch, uint32_t row, uint32_t col) const = 0;
+ /**
+ * @brief Get the value used by four indexes
+ * @param[in] batch The batch index
+ * @param[in] ch The depth index
+ * @param[in] row The height index
+ * @param[in] col The width index
+ * @return The value at the offset
+ */
virtual T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const = 0;
};
* limitations under the License.
*/
+/**
+ * @file Shape.h
+ * @brief This file contains Shape class for feature
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_FEATURE_SHAPE_H__
#define __NNFW_UTIL_FEATURE_SHAPE_H__
namespace feature
{
+/**
+ * @brief Structure to have values of dimensions for feature
+ */
struct Shape
{
- int32_t N; // Batch
- int32_t C; // Depth
- int32_t H; // Height
- int32_t W; // Width
+ int32_t N; /**< The batch value */
+ int32_t C; /**< The depth value */
+ int32_t H; /**< The height value */
+ int32_t W; /**< The width value */
+ /**
+ * @brief Construct Shape object using default constrcutor
+ */
Shape() = default;
+ /**
+ * @brief Construct Shape object with three values of dimensions
+ * @param[in] depth The depth value
+ * @param[in] height The height value
+ * @param[in] width The width value
+ */
Shape(int32_t depth, int32_t height, int32_t width) : N{1}, C{depth}, H{height}, W{width}
{
// DO NOTHING
}
+ /**
+ * @brief Construct Shape object with four values of dimensions
+ * @param[in] batch The batch value
+ * @param[in] depth The depth value
+ * @param[in] height The height value
+ * @param[in] width The width value
+ */
Shape(int32_t batch, int32_t depth, int32_t height, int32_t width) : N{batch}, C{depth}, H{height}, W{width}
{
// DO NOTHING
* limitations under the License.
*/
+/**
+ * @file TextFormatter.h
+ * @brief This file contains TextFormatter class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_FEATURE_TEXT_FORMATTER_H__
#define __NNFW_UTIL_FEATURE_TEXT_FORMATTER_H__
namespace feature
{
+/**
+ * @brief Class to print operand of feature to ostream in the given string format
+ */
template <typename T> class TextFormatter
{
public:
+ /**
+ * @brief Construct TextFormatter object with an operand's information.
+ * @param[in] shape The shape of an operand
+ * @param[in] data The data of an operand
+ */
TextFormatter(const Shape &shape, const Reader<T> &data) : _shape(shape), _data(data)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get Shape of feature as the lvalue reference
+ * @return Shape of feature
+ */
const Shape &shape(void) const { return _shape; }
+ /**
+ * @brief Get Reader<T> that can read the data of an operand
+ * @return Reader<T>
+ */
const Reader<T> &data(void) const { return _data; }
private:
+ /**
+ * @brief Shape of feature
+ */
const Shape &_shape;
+ /**
+ * @brief Reader<T> that can read the data of an operand
+ */
const Reader<T> &_data;
};
+/**
+ * @brief Print operand of feature
+ * @param[in] os Standard output stream
+ * @param[in] fmt TextFormatter to print information of an operand
+ * @return Standard output stream
+ */
template <typename T> std::ostream &operator<<(std::ostream &os, const TextFormatter<T> &fmt)
{
const auto &shape = fmt.shape();
* limitations under the License.
*/
+/**
+ * @file fp32.h
+ * @brief This file contains functions to compare float values
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_FP32_H__
#define __NNFW_UTIL_FP32_H__
namespace fp32
{
+/**
+ * @brief Get the difference between two float values as a relative value.
+ * @param[in] lhs A float value to be compared
+ * @param[in] rhs A float value to be compared
+ * @return A relative value of difference between two float values.
+ */
inline float relative_diff(float lhs, float rhs)
{
const auto diff = std::fabs(lhs - rhs);
return diff / base;
}
+/**
+ * @brief Verify that an obtained float value is equal to the expected float value by using FLT_EPSILON
+ * @param[in] expected An expected float value to be compared
+ * @param[in] obtained An obtained float value to be compared
+ * @param[in] tolerance A tolerance value
+ * @return @c true if both values are equal, otherwise @c false
+ */
inline bool epsilon_equal(float expected, float obtained, uint32_t tolerance = 1)
{
if (std::isnan(expected) && std::isnan(obtained))
return diff <= (max * FLT_EPSILON * tolerance);
}
+/**
+ * @brief Verify that an obtained float value is equal to the expected float value by comparing absolute tolerance value
+ * @param[in] expected An expected float value to be compared
+ * @param[in] obtained An obtained float value to be compared
+ * @param[in] tolerance A tolerance value
+ * @return @c true if both values are equal, otherwise @c false
+ */
inline bool absolute_epsilon_equal(float expected, float obtained, float tolerance = 0.001)
{
if (std::isnan(expected) && std::isnan(obtained))
* limitations under the License.
*/
+/**
+ * @file IndexIterator.h
+ * @brief This file contains IndexIterator class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_KERNEL_INDEX_ITERATOR_H__
#define __NNFW_UTIL_KERNEL_INDEX_ITERATOR_H__
namespace kernel
{
+/**
+ * @brief Class to iterate Callable with Index of kernel
+ */
class IndexIterator
{
public:
+ /**
+ * @brief Construct IndexIterator object with Shape of kernel
+ * @param[in] shape Shape reference of feature
+ */
IndexIterator(const Shape &shape) : _shape{shape}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Call a function iterated
+ * @param[in] cb A callback function
+ * @return Current IndexIterator object
+ */
template <typename Callable> IndexIterator &iter(Callable cb)
{
for (int32_t nth = 0; nth < _shape.N; ++nth)
}
private:
- const Shape _shape;
+ const Shape _shape; /**< Shape for kernel */
};
+/**
+ * @brief Create an object of IndexIterator for kernel
+ * @param[in] shape reference of feature
+ * @return Created IndexIterator object
+ */
inline IndexIterator iterate(const Shape &shape) { return IndexIterator{shape}; }
+/**
+ * @brief Call a function iterated using IndexIterator of kernel
+ * Overloaded operator<<
+ * @param[in] it An IndexIterator reference
+ * @param[in] cb A callback function
+ * @return Created IndexIterator object
+ */
template <typename Callable> IndexIterator &operator<<(IndexIterator &&it, Callable cb)
{
return it.iter(cb);
* limitations under the License.
*/
+/**
+ * @file RandomObject.h
+ * @brief This file contains RandomObject class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_KERNEL_RANDOM_OBJECT_H__
#define __NNFW_UTIL_KERNEL_RANDOM_OBJECT_H__
* limitations under the License.
*/
+/**
+ * @file Reader.h
+ * @brief This file contains Reader structure
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_KERNEL_READER_H__
#define __NNFW_UTIL_KERNEL_READER_H__
namespace kernel
{
+/**
+ * @brief Structure to Reader
+ */
template <typename T> struct Reader
{
+ /**
+ * @brief Destroy the Reader object as default
+ */
virtual ~Reader() = default;
+ /**
+ * @brief Get the value used by four indexes
+ * @param[in] nth The kernel index
+ * @param[in] ch The channel index
+ * @param[in] row The row index
+ * @param[in] col The column index
+ * @return The value at the offset
+ */
virtual T at(uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) const = 0;
};
* limitations under the License.
*/
+/**
+ * @file Shape.h
+ * @brief This file contains Shape structure
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_KERNEL_SHAPE_H__
#define __NNFW_UTIL_KERNEL_SHAPE_H__
namespace kernel
{
+/**
+ * @brief Structure to Shape
+ */
struct Shape
{
- int32_t N;
- int32_t C;
- int32_t H;
- int32_t W;
+ int32_t N; /**< The kernel index */
+ int32_t C; /**< The channel index */
+ int32_t H; /**< The height index */
+ int32_t W; /**< The width index */
+ /**
+ * @brief Construct a new Shape object as default
+ */
Shape() = default;
+
+ /**
+ * @brief Construct a new Shape object with parameters
+ * @param[in] count The kernel index
+ * @param[in] depth The channel index
+ * @param[in] height The height index
+ * @param[in] width The width index
+ */
Shape(int32_t count, int32_t depth, int32_t height, int32_t width)
: N{count}, C{depth}, H{height}, W{width}
{
* limitations under the License.
*/
+/**
+ * @file IndexIterator.h
+ * @brief This file contains IndexIterator class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_MATRIX_INDEX_ITERATOR_H__
#define __NNFW_UTIL_MATRIX_INDEX_ITERATOR_H__
namespace matrix
{
+/**
+ * @brief Class to iterate Callable with Index of matrix
+ */
class IndexIterator
{
public:
+ /**
+ * @brief Construct IndexIterator object with Shape of matrix
+ * @param[in] shape Shape reference of matrix
+ */
IndexIterator(const Shape &shape) : _shape{shape}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Call a function iterated
+ * @param[in] cb A callback function
+ * @return Current IndexIterator object
+ */
template <typename Callable> IndexIterator &iter(Callable cb)
{
for (uint32_t row = 0; row < _shape.H; ++row)
}
private:
+ /**
+ * @brief Shape for matrix
+ */
const Shape _shape;
};
+/**
+ * @brief Create an object of IndexIterator for matrix
+ * @param[in] Shape reference of matrix
+ * @return Created IndexIterator object
+ */
inline IndexIterator iterate(const Shape &shape) { return IndexIterator{shape}; }
+/**
+ * @brief Call a function iterated using IndexIterator of matrix
+ * Overloaded operator<<
+ * @param[in] it An IndexIterator reference
+ * @param[in] cb A callback function
+ * @return created IndexIterator object
+ */
template <typename Callable> IndexIterator &operator<<(IndexIterator &&it, Callable cb)
{
return it.iter(cb);
* limitations under the License.
*/
+/**
+ * @file Reader.h
+ * @brief This file contains Reader class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_MATRIX_READER_H__
#define __NNFW_UTIL_MATRIX_READER_H__
namespace matrix
{
+/**
+ * @brief Class reads values of matrix
+ * The interface class
+ */
template <typename T> struct Reader
{
+ /**
+ * @brief Destruct Reader object using default destructor
+ */
virtual ~Reader() = default;
+ /**
+ * @brief Get the value used by two indexes
+ * @param[in] row The height index
+ * @param[in] col The width index
+ * @return The value at the offset
+ */
virtual T at(uint32_t row, uint32_t col) const = 0;
};
* limitations under the License.
*/
-// for 2D tensor
+/**
+ * @file Shape.h
+ * @brief This file contains Shape class for matrix
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_MATRIX_SHAPE_H__
#define __NNFW_UTIL_MATRIX_SHAPE_H__
namespace matrix
{
+/**
+ * @brief Structure to have values of dimensions for matrix
+ */
struct Shape
{
- int32_t H; // Height
- int32_t W; // Width
+ int32_t H; /**< The height value */
+ int32_t W; /**< The width value */
+ /**
+ * @brief Construct Shape object using default constrcutor
+ */
Shape() = default;
+
+ /**
+ * @brief Construct Shape object with two values of dimensions
+ * @param[in] height The height value
+ * @param[in] width The width value
+ */
Shape(int32_t height, int32_t width) : H{height}, W{width}
{
// DO NOTHING
}
};
-} // namespace feature
+} // namespace matrix
} // namespace util
} // namespace nnfw
* limitations under the License.
*/
+/**
+ * @file Comparator.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::tensor::Comparator class
+ */
+
#ifndef __NNFW_UTIL_TENSOR_COMPARATOR_H__
#define __NNFW_UTIL_TENSOR_COMPARATOR_H__
namespace tensor
{
+/**
+ * @brief Class to compare two tensors (expected and obtained to compare)
+ */
class Comparator
{
public:
+ /**
+ * @brief Construct a new @c Comparator object
+ * @param[in] fn Function that compares two float values
+ */
Comparator(const std::function<bool (float lhs, float rhs)> &fn) : _compare_fn{fn}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Struct to observe comparison results
+ */
struct Observer
{
+ /**
+ * @brief Get notification of comparison result at every index of two tensors
+ * @param[in] index Index of tensors compared
+ * @param[in] expected Expected value of element at @c index
+ * @param[in] obtained Obtained value of element at @c index
+ * @return N/A
+ */
virtual void notify(const Index &index, float expected, float obtained) = 0;
};
public:
+ /**
+ * @brief Compare two tensors
+ * @param[in] shape Shape of two tensors
+ * @param[in] expected @c Reader<float> object that accesses expected tensor
+ * @param[in] obtained @c Reader<float> object that accesses obtained tensor
+ * @param[in] observer @c Observer notified of expected value and obtained value at every index
+ * @return @c std::vector<Diff<float>> containing information of failed comparison
+ */
// NOTE Observer should live longer than comparator
std::vector<Diff<float>> compare(const Shape &shape,
const Reader<float> &expected,
* limitations under the License.
*/
+/**
+ * @file Diff.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::tensor::Diff struct
+ */
+
#ifndef __NNFW_UTIL_TENSOR_DIFF_H__
#define __NNFW_UTIL_TENSOR_DIFF_H__
namespace tensor
{
+/**
+ * @brief Struct to have information after comparing two elements of two tensors
+ */
template<typename T> struct Diff
{
- Index index;
+ Index index; /**< Index of elements in two tensors, which turn out to be different */
- T expected;
- T obtained;
+ T expected; /**< Expected value of element of first tensor */
+ T obtained; /**< Obtained value of element of second tensor */
+ /**
+ * @brief Construct a new @c Diff object
+ * @param[in] i Initial value of index
+ */
Diff(const Index &i) : index(i)
{
// DO NOTHING
}
+ /**
+ * @brief Construct a new @c Diff object
+ * @param[in] i Index value
+ * @param[in] e Expected value of element of first tensor
+ * @param[in] o Obtained value of element of second tensor
+ */
Diff(const Index &i, const T &e, const T &o) : index(i), expected{e}, obtained{o}
{
// DO NOTHING
* limitations under the License.
*/
+/**
+ * @file Index.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::tensor::Index struct
+ */
#ifndef __NNFW_UTIL_TENSOR_INDEX_H__
#define __NNFW_UTIL_TENSOR_INDEX_H__
namespace tensor
{
+/**
+ * @brief Struct to represent index of each dimension of a tensor
+ */
struct Index
{
public:
+ /**
+ * @brief Construct a new @c Index object
+ * @param[in] rank Rank of a tensor
+ */
Index(size_t rank) { _offsets.resize(rank); }
public:
+ /**
+ * @brief Construct a new @c Index object
+ * @param[in] offsets Rank of a tensor of @c std::initializer_list<int32_t> type
+ */
Index(std::initializer_list<int32_t> offsets) : _offsets{offsets}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get the rank
+ * @return Rank that this @c Index object can handle
+ */
size_t rank(void) const { return _offsets.size(); }
public:
+ /**
+ * @brief Get the index n'th dimension
+ * @param[in] n Dimension
+ * @return index of n'th dimension
+ */
int32_t at(size_t n) const { return _offsets.at(n); }
+
+ /**
+ * @brief Get the reference of the index n'th dimension
+ * @param[in] n Dimension
+ * @return reference of index of n'th dimension
+ */
int32_t &at(size_t n) { return _offsets.at(n); }
private:
std::vector<int32_t> _offsets;
};
-// This is used to convert NNAPI tensor index to ARM tensor index or vice versa
+/**
+ * @brief Copy an @c Index with reversed order
+ * @param[in] origin @c Index object to copy
+ * @return an @c Index object with reversed order
+ * @note This is used to convert NNAPI tensor index to ARM tensor index or vice versa
+ */
inline static Index copy_reverse(const Index &origin)
{
size_t rank = origin.rank();
* limitations under the License.
*/
+/**
+ * @file IndexEnumerator.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::tensor::IndexEnumerator class
+ */
+
#ifndef __NNFW_UTIL_TENSOR_INDEX_ENUMERATOR_H__
#define __NNFW_UTIL_TENSOR_INDEX_ENUMERATOR_H__
{
namespace tensor
{
-
+/**
+ * @brief Class to enumerate index of a tensor
+ *
+ */
class IndexEnumerator
{
public:
+ /**
+ * @brief Construct a new @c IndexEnumerator object
+ * @param[in] shape Shape of tensor of which index will be enumerate
+ */
explicit IndexEnumerator(const Shape &shape) : _shape(shape), _index(shape.rank()), _cursor(0)
{
const size_t rank = _shape.rank();
}
public:
+ /**
+ * @brief Prevent constructing @c IndexEnumerator object by using R-value reference
+ */
IndexEnumerator(IndexEnumerator &&) = delete;
+ /**
+ * @brief Prevent copy constructor
+ */
IndexEnumerator(const IndexEnumerator &) = delete;
public:
+ /**
+ * @brief Check if more enumeration is available
+ * @return @c true if more @c advance() is available, otherwise @c false
+ */
bool valid(void) const { return _cursor < _shape.rank(); }
public:
+ /**
+ * @brief Get the current index to enumerate
+ * @return Current index
+ */
const Index &curr(void) const { return _index; }
public:
+ /**
+ * @brief Advance index by +1
+ */
void advance(void)
{
const size_t rank = _shape.rank();
}
public:
- const Shape _shape;
+ const Shape _shape; //!< Shape to enumerate
private:
size_t _cursor;
* limitations under the License.
*/
+/**
+ * @file IndexFormatter.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::tensor::IndexFormatter class
+ */
+
#ifndef __NNFW_UTIL_TENSOR_INDEX_FORMATTER_H__
#define __NNFW_UTIL_TENSOR_INDEX_FORMATTER_H__
namespace tensor
{
+/**
+ * @brief Class to send @c Index object to output stream
+ */
class IndexFormatter
{
public:
+ /**
+ * @brief Construct a new @c IndexFormatter object
+ * @param[in] index index to be sent to output stream
+ */
IndexFormatter(const nnfw::util::tensor::Index &index) : _index(index)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get an @c Index object
+ * @return @c Index object previously passed to the constructor
+ */
const nnfw::util::tensor::Index &index(void) const { return _index; }
private:
const nnfw::util::tensor::Index &_index;
};
+/**
+ * @brief Send @c IndexFormatter object to output stream
+ * @param[in] os Output stream
+ * @param[in] fmt @c IndexFormatter object that is sent to output stream
+ * @return Output stream
+ */
std::ostream &operator<<(std::ostream &os, const IndexFormatter &fmt);
} // namespace tensor
* limitations under the License.
*/
+/**
+ * @file IndexIterator.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::tensor::IndexIterator class and
+ * helper function and operator
+ */
#ifndef __NNFW_UTIL_TENSOR_INDEX_ITERATOR_H__
#define __NNFW_UTIL_TENSOR_INDEX_ITERATOR_H__
namespace tensor
{
+/**
+ * @brief Class to iterate indexes available for given shape
+ */
class IndexIterator
{
public:
+ /**
+ * @brief Construct a new @c IndexIterator object
+ * @param[in] shape Shape of tensor of which index will be iterated
+ */
IndexIterator(const Shape &shape) : _shape(shape)
{
// DO NOTHING
}
public:
- // Allow move, but disallow copy
+ /**
+ * @brief Construct a new IndexIterator object using reference
+ * @param[in] IndexIterator @c IndexIterator object to move
+ */
IndexIterator(IndexIterator &&) = default;
+
+ /**
+ * @brief Prevent copy constructor
+ */
IndexIterator(const IndexIterator &) = delete;
public:
+ /**
+ * @brief Iterate all available indexes and run a function for each index
+ * @param[in] fn Function that requires an index as a parameter.
+ * @return @c IndexIterator object
+ */
template <typename Callable> IndexIterator &iter(Callable fn)
{
for (IndexEnumerator e{_shape}; e.valid(); e.advance())
const Shape &_shape;
};
+/**
+ * @brief Get an @c IndexItator object
+ * @param[in] shape Shape of tensor of which index will be iterated
+ * @return @c IndexIterator object
+ */
inline IndexIterator iterate(const Shape &shape) { return IndexIterator{shape}; }
+/**
+ * @brief Iterate all indexes and apply a function
+ * @param[in] it @c IndexIterator object that is constructed with a tensor shape
+ * @param[in] cb A function that will receive a specific index.
+ * Inside the function, the index is used to manipulate tensor element.
+ * @return @c IndexIterator object
+ */
template <typename Callable> IndexIterator &operator<<(IndexIterator &&it, Callable cb)
{
return it.iter(cb);
* limitations under the License.
*/
+/**
+ * @file NonIncreasingStride.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::tensor::NonIncreasingStride class
+ */
#ifndef __NNFW_UTIL_TENSOR_NON_INCREASING_STRIDE_H__
#define __NNFW_UTIL_TENSOR_NON_INCREASING_STRIDE_H__
namespace tensor
{
-// As its name suggests, stride[N-1] >= stride[N] holds for all N < rank in NonIncreasingStride.
+/**
+ * @brief Class to represent strides where stride[N-1] >= stride[N] holds for all N < rank
+ */
class NonIncreasingStride
{
public:
+ /**
+ * @brief Initialize the stride data using @c Shape
+ * @param[in] shape to build stride info
+ * @return N/A
+ */
void init(const Shape &shape)
{
_stride.resize(shape.rank());
}
public:
+ /**
+ * @brief Get an stride value for specific axis
+ * @param[in] axis Axis of stride
+ * @return The value of stride
+ */
uint32_t at(uint32_t axis) const { return _stride.at(axis); }
public:
+ /**
+ * @brief Get the 1-D offset of specified index for n-D tensor
+ * @param index @c Index object
+ * @return 1-D offset of index
+ */
uint32_t offset(const Index &index) const;
private:
* limitations under the License.
*/
+/**
+ * @file Object.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::tensor::Object class
+ */
+
#ifndef __NNFW_UTIL_TENSOR_OBJECT_H__
#define __NNFW_UTIL_TENSOR_OBJECT_H__
namespace tensor
{
+/**
+ * @brief Class to build a tensor using specific generator
+ * @tparam T Type of tensor element
+ */
+
template <typename T> class Object final : public Reader<T>
{
public:
+ /**
+ * @brief Function to generate tensor element
+ */
using Generator = std::function<T(const Shape &shape, const Index &index)>;
public:
+ /**
+ * @brief Construct a new @c Object object
+ * @param[in] shape Tensor shape
+ * @param[in] fn Function to generate tensor elements
+ */
Object(const Shape &shape, const Generator &fn) : _shape{shape}
{
// Set 'stride'
}
public:
+ /**
+ * @brief Get reference of shape
+ * @return Reference of shape
+ */
const Shape &shape(void) const { return _shape; }
public:
+ /**
+ * @brief Get and element of tensor
+ * @param[in] index Index of a tensor element
+ * @return Value of tensor element
+ */
T at(const Index &index) const override { return _values.at(_stride.offset(index)); }
private:
* limitations under the License.
*/
+/**
+ * @file Reader.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::tensor::Reader struct
+ */
+
#ifndef __NNFW_UTIL_TENSOR_READER_H__
#define __NNFW_UTIL_TENSOR_READER_H__
namespace tensor
{
+/**
+ * @brief Struct to read element of tensor
+ * @tparam T Type of elements in tensor
+ */
template <typename T> struct Reader
{
+ /**
+ * @brief Destroy the Reader object
+ */
virtual ~Reader() = default;
+ /**
+ * @brief Get an element of tensor
+ * @param[in] index Index specifying indexes of tensor element
+ * @return The value of specificed element
+ */
virtual T at(const Index &index) const = 0;
};
* limitations under the License.
*/
+/**
+ * @file Shape.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::tensor::Shape class
+ */
+
#ifndef __NNFW_UTIL_TENSOR_SHAPE_H__
#define __NNFW_UTIL_TENSOR_SHAPE_H__
namespace tensor
{
+/**
+ * @brief Class to prepresent shape of a tensor
+ */
class Shape
{
public:
+ /**
+ * @brief Construct a new Shape object
+ * @param[in] rank Rank of a tensor
+ */
Shape(size_t rank) { _dimensions.resize(rank); }
public:
+ /**
+ * @brief Construct a new Shape object
+ * @param[in] dimensions @c initializer_list<int32_t> of dimensions of tensor
+ */
Shape(const std::initializer_list<int32_t> &dimensions) : _dimensions{dimensions}
{
// DO NOTHING
}
+ /**
+ * @brief Construct a new Shape object
+ * @param[in] origin @c Shape object to copy
+ */
Shape(const Shape &origin) = default;
public:
+ /**
+ * @brief Add diminsion to the beginning
+ * @param[in] d dimension to add to the beginning
+ * @return N/A
+ */
void prepend(int32_t d) { _dimensions.emplace_front(d); }
+
+ /**
+ * @brief Add diminsion to the back
+ * @param[in] d dimension to add to the back
+ * @return N/A
+ */
void append(int32_t d) { _dimensions.emplace_back(d); }
public:
+ /**
+ * @brief Get the rank of this shape
+ * @return rank
+ */
size_t rank(void) const { return _dimensions.size(); }
public:
+ /**
+ * @brief Get specific dimension
+ * @param[in] n Index of dimention
+ * @return n'th dimension
+ */
int32_t dim(size_t n) const { return _dimensions.at(n); }
+
+ /**
+ * @brief Get the reference of specific dimension
+ * @param[in] n Index of dimention
+ * @return Reference of n'th dimension
+ */
int32_t &dim(size_t n) { return _dimensions.at(n); }
public:
+ /**
+ * @brief Get the number of elements specified by this shape
+ * @return The number of elements
+ */
size_t element_nums() const
{
size_t nums = 1;
std::deque<int32_t> _dimensions;
public:
+ /**
+ * @brief Get a @c Shape object after parsing string
+ * @param[in] s String of diminsion list. Accepted format is numbers separated by comma.
+ * @return @c Shape object
+ */
static Shape from(const std::string &s);
};
+/**
+ * @brief Check equality of two @c Shape
+ * @param[in] Shape First shape to compare
+ * @param[in] Shape Second shape to compare
+ * @return @c true if both shapes are equal, otherwise @c false
+ */
bool operator==(const Shape &, const Shape &);
+/**
+ * @brief Send @c Shape to @c std::ostream
+ * @param[in] os @c std::ostream to process this @c Shape
+ * @param[in] shape @c Shape to send to @c ostream
+ * @return Reference of @c std::ostream
+ */
std::ostream &operator<<(std::ostream &os, const Shape &shape);
} // namespace tensor
* limitations under the License.
*/
+/**
+ * @file Zipper.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains nnfw::util::tensor::Zipper class
+ */
+
#ifndef __NNFW_UTIL_TENSOR_ZIPPER_H__
#define __NNFW_UTIL_TENSOR_ZIPPER_H__
namespace tensor
{
+/**
+ * @brief Class to apply a function with three params: @c Index, elements of a tensor
+ * at passed index read by @c Reader objects
+ */
template <typename T> class Zipper
{
public:
+ /**
+ * @brief Construct a new @c Zipper object
+ * @param[in] shape Shape of @c lhs and @c rhs
+ * @param[in] lhs @c Reader object of a tensor
+ * @param[in] rhs @c Reader object of a tensor
+ */
Zipper(const Shape &shape, const Reader<T> &lhs, const Reader<T> &rhs)
: _shape{shape}, _lhs{lhs}, _rhs{rhs}
{
}
public:
+ /**
+ * @brief Apply @c cb to all elements of tensors. Elements of two tensors
+ * at passed @c index are read by @c lhs and @c rhs
+ * @param[in] cb Function to apply
+ * @return N/A
+ */
template <typename Callable> void zip(Callable cb) const
{
iterate(_shape) <<
const Reader<T> &_rhs;
};
+/**
+ * @brief Apply @c cb by using @c lhs and @c rhs passed to the constructor of @c zipper
+ * @param[in] zipper @c Zipper object
+ * @param[in] cb Function to zpply using @c zip function
+ * @return @c zipper object after applying @c cb to @c zipper
+ */
template <typename T, typename Callable>
const Zipper<T> &operator<<(const Zipper<T> &zipper, Callable cb)
{
return zipper;
}
+/**
+ * @brief Get @c Zipper object constructed using passed params
+ * @param shape Shape of @c lhs and @c rhs
+ * @param lhs @c Reader object of a tensor
+ * @param rhs @c Reader object of a tensor
+ * @return @c Zipper object
+ */
template <typename T> Zipper<T> zip(const Shape &shape, const Reader<T> &lhs, const Reader<T> &rhs)
{
return Zipper<T>{shape, lhs, rhs};
* limitations under the License.
*/
+/**
+ * @file vector.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains @c == operator to check equality of elements in two vectors
+ */
#ifndef __NNFW_UTIL_VECTOR_H__
#define __NNFW_UTIL_VECTOR_H__
#include <vector>
+/**
+ * @brief Compare elements of two vectors
+ * @tparam T Type of elements in vectors
+ * @param[in] lhs First vector to compare
+ * @param[in] rhs Second vector to compare
+ * @return @c true if all elements are equal, otherwise @c false.
+ */
template <typename T> bool operator==(const std::vector<T> &lhs, const std::vector<T> &rhs)
{
if (lhs.size() != rhs.size())
* limitations under the License.
*/
+/**
+ * @file Object.h
+ * @brief This file contains Object class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_VECTOR_OBJECT_H__
#define __NNFW_UTIL_VECTOR_OBJECT_H__
namespace vector
{
+/**
+ * @brief Class to have information of the operand for vector
+ */
template <typename T> class Object final : public Reader<T>
{
public:
using Generator = std::function<T(int32_t size, int32_t offset)>;
public:
+ /**
+ * @brief Construct Object object with size of vector and set value used by Generator
+ * @param[in] size The size of vector
+ * @param[in] gen A function to set values of operand tensor
+ */
Object(int32_t size, const Generator &gen) : _size{size}
{
_value.resize(_size);
}
public:
+ /**
+ * @brief Get size of vector
+ * @return Size of vector
+ */
int32_t size(void) const { return _size; }
public:
+ /**
+ * @brief Get the value used by index
+ * @param[in] nth The vector index
+ * @return The value at the offset
+ */
T at(uint32_t nth) const override { return _value.at(nth); }
private:
+ /**
+ * @brief Size of vector
+ */
const int32_t _size;
+ /**
+ * @brief The tensor vector of operand
+ */
std::vector<T> _value;
};
* limitations under the License.
*/
+/**
+ * @file Reader.h
+ * @brief This file contains Reader class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_UTIL_VECTOR_READER_H__
#define __NNFW_UTIL_VECTOR_READER_H__
namespace vector
{
+/**
+ * @brief Class reads values of vector
+ * The interface class
+ */
template <typename T> struct Reader
{
+ /**
+ * @brief Destruct Reader object using default destructor
+ */
virtual ~Reader() = default;
+ /**
+ * @brief Get the value used by the index
+ * @param[in] nth The vector index
+ * @return The value at the offset
+ */
virtual T at(uint32_t nth) const = 0;
};
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLKernelLibraryEx.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file is a cloned version of CLKernelLibrary.h in ACL. This file defines
+ * an interface for CLKernelLibrary.cpp which adds more OpenCL kernels on top of ACL.
+ */
+
#ifndef __ARM_COMPUTE_CLKERNELLIBRARY_EX_H__
#define __ARM_COMPUTE_CLKERNELLIBRARY_EX_H__
namespace arm_compute
{
-/** CLKernelLibrary class */
+/**
+ * @brief Class to build OpenCL kernels added from nnfw
+ * */
class CLKernelLibraryEx
{
using StringSet = std::set<std::string>;
private:
- /** Default Constructor. */
+ /**
+ * @brief Construct a new CLKernelLibraryEx object
+ */
CLKernelLibraryEx();
public:
- /** Prevent instances of this class from being copied */
+ /**
+ * @brief Prevent instances of this class from being copied.
+ */
CLKernelLibraryEx(const CLKernelLibraryEx &) = delete;
- /** Prevent instances of this class from being copied */
+
+ /**
+ * @brief Prevent instances of this class from being copied.
+ */
const CLKernelLibraryEx &operator=(const CLKernelLibraryEx &) = delete;
- /** Access the KernelLibrary singleton.
- * @return The KernelLibrary instance.
+
+ /**
+ * @brief Get the KernelLibrary singleton.
+ * @return The KernelLibrary instance
*/
static CLKernelLibraryEx &get();
- /** Initialises the kernel library.
- *
+
+ /**
+ * @brief Initialise the kernel library.
* @param[in] kernel_path (Optional) Path of the directory from which kernel sources are loaded.
* @param[in] context (Optional) CL context used to create programs.
* @param[in] device (Optional) CL device for which the programs are created.
+ * @return N/A
*/
void init(std::string kernel_path = ".", cl::Context context = cl::Context::getDefault(),
cl::Device device = cl::Device::getDefault())
_context = std::move(context);
_device = std::move(device);
}
- /** Sets the path that the kernels reside in.
- *
- * @param[in] kernel_path Path of the kernel.
+
+ /**
+ * @brief Set the path that the kernels reside in.
+ * @param[in] kernel_path Path of the directory from which kernel sources are loaded.
+ * @return N/A
*/
void set_kernel_path(const std::string &kernel_path) { _kernel_path = kernel_path; };
- /** Gets the path that the kernels reside in.
+
+ /**
+ * @brief Get the path that the kernels reside in.
+ * @return the path of kernel files
*/
std::string get_kernel_path() { return _kernel_path; };
- /** Gets the source of the selected program.
- *
+
+ /**
+ * @brief Get the source of the selected program.
* @param[in] program_name Program name.
- *
* @return Source of the selected program.
*/
std::string get_program_source(const std::string &program_name);
- /** Sets the CL context used to create programs.
- *
+
+ /**
+ * @brief Set the CL context used to create programs.
* @note Setting the context also resets the device to the
* first one available in the new context.
- *
* @param[in] context A CL context.
+ * @return N/A
*/
void set_context(cl::Context context)
{
}
}
- /** Accessor for the associated CL context.
- *
+ /**
+ * @brief Return associated CL context.
* @return A CL context.
*/
cl::Context &context() { return _context; }
- /** Sets the CL device for which the programs are created.
- *
+ /**
+ * @brief Set the CL device for which the programs are created.
* @param[in] device A CL device.
+ * @return N/A
*/
void set_device(cl::Device device) { _device = std::move(device); }
- /** Return the device version
- *
+ /**
+ * @brief Return the device version
* @return The content of CL_DEVICE_VERSION
*/
std::string get_device_version();
- /** Creates a kernel from the kernel library.
- *
+
+ /**
+ * @brief Create a kernel from the kernel library.
* @param[in] kernel_name Kernel name.
* @param[in] build_options_set Kernel build options as a set.
- *
* @return The created kernel.
*/
Kernel create_kernel(const std::string &kernel_name,
const StringSet &build_options_set = {}) const;
- /** Find the maximum number of local work items in a workgroup can be supported for the kernel.
- *
+
+ /**
+ * @brief Find the maximum number of local work items in a workgroup can be supported for the
+ * kernel.
+ * @param[in] kernel kernel object
*/
+
size_t max_local_workgroup_size(const cl::Kernel &kernel) const;
- /** Return the default NDRange for the device.
- *
+ /**
+ * @brief Return the default NDRange for the device.
+ * @return default NDRangeof the device
*/
cl::NDRange default_ndrange() const;
- /** Clear the library's cache of binary programs
+ /**
+ * @brief Clear the library's cache of binary programs
+ * @return N/A
*/
void clear_programs_cache()
{
_built_programs_map.clear();
}
- /** Access the cache of built OpenCL programs */
+ /**
+ * @brief Access the cache of built OpenCL programs
+ * @return program map data structure of which key is name of kernel and value is
+ * kerel source name. (*.cl)
+ */
const std::map<std::string, cl::Program> &get_built_programs() const
{
return _built_programs_map;
}
- /** Add a new built program to the cache
- *
+ /**
+ * @brief Add a new built program to the cache
* @param[in] built_program_name Name of the program
* @param[in] program Built program to add to the cache
+ * @return N/A
*/
void add_built_program(const std::string &built_program_name, cl::Program program);
private:
- /** Load program and its dependencies.
- *
+ /**
+ * @brief Load program and its dependencies.
* @param[in] program_name Name of the program to load.
*/
const Program &load_program(const std::string &program_name) const;
- /** Concatenates contents of a set into a single string.
- *
+ /**
+ * @brief Concatenates contents of a set into a single string.
* @param[in] s Input set to concatenate.
- *
* @return Concatenated string.
*/
std::string stringify_set(const StringSet &s) const;
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLCastKernel.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines CLCastKernel class
+ */
+
#ifndef __ARM_COMPUTE_CLCASTKERNEL_H__
#define __ARM_COMPUTE_CLCASTKERNEL_H__
{
class ICLTensor;
-/** OpenCL kernel to perform a cast operation */
+/**
+ * @brief Class to define OpenCL kernel for cast operation
+ */
class CLCastKernel : public ICLKernel
{
public:
- /** Default constructor */
+ /**
+ * @brief Construct CLCastKernel object
+ */
CLCastKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers)
+ */
CLCastKernel(const CLCastKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers)
+ */
CLCastKernel &operator=(const CLCastKernel &) = delete;
- /** Allow instances of this class to be moved */
+
+ /**
+ * @brief Construct CLCastKernel object using default move constructor
+ * @param[in] CLCastKernel object to move
+ */
CLCastKernel(CLCastKernel &&) = default;
- /** Allow instances of this class to be moved */
+
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param[in] CLCastKernel object to move
+ */
CLCastKernel &operator=(CLCastKernel &&) = default;
- /** Default destructor */
+
+ /**
+ * @brief Destruct this CLCastKernel object
+ */
~CLCastKernel() = default;
- /** Initialise the kernel's input and output.
- *
+
+ /**
+ * @brief Initialise the kernel's input and output.
* @param[in] input Input tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32.
* @param[in] output Output tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32.
+ * @return N/A
*/
void configure(const ICLTensor *input, ICLTensor *output);
- // Inherited methods overridden:
+ /**
+ * @brief Enqueue the OpenCL kernel to process the given window on the passed OpenCL command
+ * queue.
+ * @note The queue is *not* flushed by this method, and therefore the kernel will not have
+ * been executed by the time this method returns.
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of
+ * the window returned by window()).
+ * @param[in,out] queue Command queue on which to enqueue the kernel.@return N/A
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
private:
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLGatherKernel.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines CLGatherKernel class
+ */
+
#ifndef __ARM_COMPUTE_CLGATHERKERNEL_H__
#define __ARM_COMPUTE_CLGATHERKERNEL_H__
{
class ICLTensor;
-/** Interface for the gather kernel.
- *
+/**
+ * @brief Class to define an interface for the gather kernel.
*/
class CLGatherKernel : public ICLKernel
{
public:
- /** Default constructor.*/
+ /**
+ * @brief Construct CLGatherKernel object
+ * */
CLGatherKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers). */
+
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ */
CLGatherKernel(const CLGatherKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers). */
+
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ */
CLGatherKernel &operator=(const CLGatherKernel &) = delete;
- /** Allow instances of this class to be moved */
+
+ /**
+ * @brief Construct CLGatherKernel object by using default move constructor
+ * @param[in] CLGatherKernel object to move
+ */
CLGatherKernel(CLGatherKernel &&) = default;
- /** Allow instances of this class to be moved */
+
+ /**
+ * @brief Move assignment operator
+ * @param[in] CLGatherKernel object to move
+ */
CLGatherKernel &operator=(CLGatherKernel &&) = default;
- /** Initialise the kernel's input, output and border mode.
- *
+
+ /**
+ * @brief Initialise the kernel's input, output and border mode.
* @param[in] input1 An input tensor. Data types supported: U8/S32/F32.
* @param[in] input2 An input tensor. Data types supported: S32.
* @param[out] output The output tensor, Data types supported: same as @p input1.
+ * @return N/A
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output);
- /** Static function to check if given info will lead to a valid configuration of @ref
+
+ /**
+ * @brief Static function to check if given info will lead to a valid configuration of @ref
* CLGatherKernel
- *
* @param[in] input1 An input tensor. Data types supported: U8/S32/F32.
* @param[in] input2 An input tensor. Data types supported: S32.
* @param[out] output The output tensor, Data types supported: same as @p input1.
- *
* @return a status
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2,
const ITensorInfo *output);
- // Inherited methods overridden:
+ /**
+ * @brief Enqueue the OpenCL kernel to process the given window on the passed OpenCL command
+ * queue.
+ * @note The queue is *not* flushed by this method, and therefore the kernel will not have
+ * been executed by the time this method returns.
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of
+ * the window returned by window()).
+ * @param[in,out] queue Command queue on which to enqueue the kernel.@return N/A
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
private:
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLPixelWiseDivisionKernel.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines CLPixelWiseDivisionKernel class
+ */
+
#ifndef __ARM_COMPUTE_CLPIXELWISEDIVISIONKERNEL_H__
#define __ARM_COMPUTE_CLPIXELWISEDIVISIONKERNEL_H__
{
class ICLTensor;
-/** Interface for the pixelwise division kernel.
- *
+/**
+ * @brief Interface for the pixelwise division kernel.
*/
class CLPixelWiseDivisionKernel : public ICLKernel
{
public:
- /** Default constructor.*/
+ /**
+ * @brief Construct a CLPixelWiseDivisionKernel object
+ */
CLPixelWiseDivisionKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers). */
+
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ */
CLPixelWiseDivisionKernel(const CLPixelWiseDivisionKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers). */
+
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ */
CLPixelWiseDivisionKernel &operator=(const CLPixelWiseDivisionKernel &) = delete;
- /** Allow instances of this class to be moved */
+
+ /**
+ * @brief Construct a CLPixelWiseDivisionKernel object by using move constructor
+ * @param[in] CLPixelWiseDivisionKernel object to move
+ */
CLPixelWiseDivisionKernel(CLPixelWiseDivisionKernel &&) = default;
- /** Allow instances of this class to be moved */
+
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param[in] CLPixelWiseDivisionKernel object to move
+ */
CLPixelWiseDivisionKernel &operator=(CLPixelWiseDivisionKernel &&) = default;
- /** Initialise the kernel's input, output and border mode.
- *
+
+ /**
+ * @brief Initialise the kernel's input, output and border mode.
* @param[in] input1 An input tensor. Data types supported: U8/QS8/QS16/S16/F16/F32.
* @param[in] input2 An input tensor. Data types supported: same as @p input1.
* @param[out] output The output tensor, Data types supported: same as @p input1. Note:
* @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest
* even.
+ * @return N/A
*/
void configure(const ICLTensor *input1, const ICLTensor *input2, ICLTensor *output, float scale,
ConvertPolicy overflow_policy, RoundingPolicy rounding_policy);
- /** Static function to check if given info will lead to a valid configuration of @ref
+
+ /**
+ * @brief Static function to check if given info will lead to a valid configuration of @ref
* CLPixelWiseDivisionKernel
- *
* @param[in] input1 An input tensor info. Data types supported: U8/QS8/QS16/S16/F16/F32.
* @param[in] input2 An input tensor info. Data types supported: same as @p input1.
* @param[in] output The output tensor info, Data types supported: same as @p input1.
* where n is between 0 and 15. For QS8 and QS16 scale must be 1.
* @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
- *
* @return a status
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2,
const ITensorInfo *output, float scale, ConvertPolicy overflow_policy,
RoundingPolicy rounding_policy);
- // Inherited methods overridden:
+ /**
+ * @brief Enqueue the OpenCL kernel to process the given window on the passed OpenCL command
+ * queue.
+ * @note The queue is *not* flushed by this method, and therefore the kernel will not have
+ * been executed by the time this method returns.
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of
+ * the window returned by window()).
+ * @param[in,out] queue Command queue on which to enqueue the kernel.@return N/A
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
+
+ /**
+ * @brief The size of the border for that kernel
+ * @return The width in number of elements of the border.
+ */
BorderSize border_size() const override;
private:
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLReduceMaxKernel.h
+ * @brief This file defines CLReduceMaxKernel
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __ARM_COMPUTE_CLREDUCEMAXKERNEL_H__
#define __ARM_COMPUTE_CLREDUCEMAXKERNEL_H__
{
class ICLTensor;
-/** Interface for the pixelwise division kernel.
- *
+/**
+ * @brief Class to define interface for the reduce max kernel.
*/
class CLReduceMaxKernel : public ICLKernel
{
public:
- /** Default constructor.*/
+ /**
+ * @brief Default constructor.
+ */
CLReduceMaxKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers). */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLReduceMaxKernel to be copied
+ */
CLReduceMaxKernel(const CLReduceMaxKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers). */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLReduceMaxKernel to be copied
+ * @return Reference of this instance
+ */
CLReduceMaxKernel &operator=(const CLReduceMaxKernel &) = delete;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLReduceMaxKernel to be moved
+ */
CLReduceMaxKernel(CLReduceMaxKernel &&) = default;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLReduceMaxKernel to be moved
+ * @return Reference of this instance
+ */
CLReduceMaxKernel &operator=(CLReduceMaxKernel &&) = default;
- /** Initialise the kernel's input, output and border mode.
- *
- * @param[in] input An input tensor. Data types supported: QASYMM8/S32/F32.
- * @param[in] reduce_axis Axises to reduce
- * @param[out] output The output tensor, Data types supported: same as @p input.
+ /**
+ * @brief Initialise the kernel's input, output and border mode.
+ * @param[in] input An input tensor. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] reduce_axis Axis to reduce
+ * @param[out] output The output tensor, Data types supported: same as @p input1. Note:
+ * U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
+ * return N/A
*/
void configure(const ICLTensor *input, std::vector<uint32_t> reduce_axis, ICLTensor *output);
- /** Static function to check if given info will lead to a valid configuration of @ref
+ /**
+ * @brief Static function to check if given info will lead to a valid configuration of @ref
* CLReduceMaxKernel
- *
- * @param[in] input An input tensor info. Data types supported: QASYMM8/S32/F32.
- * @param[in] reduce_axis Axises to reduce
- * @param[in] output The output tensor info, Data types supported: same as @p input.
- *
+ * @param[in] input An input tensor info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] reduce_axis Axis to reduce
+ * @param[in] output The output tensor info, Data types supported: same as @p input1.
+ * Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
* @return a status
*/
static Status validate(const ITensorInfo *input, const std::vector<uint32_t> &reduce_axis,
const ITensorInfo *output);
- // Inherited methods overridden:
+ /*
+ * @brief Run CLReduceMaxKernel op
+ * @param[in] window Window to be used for in_slice
+ * @param[in] queue cl::CommandQueue
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
+ /*
+ * @brief Run CLReduceMaxKernel op on CPU
+ * @param[in] queue cl::CommandQueue
+ * @return N/A
+ */
void run_on_cpu(cl::CommandQueue &queue);
private:
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLReductionMeanKernel.h
+ * @brief This file defines CLReductionMeanKernel class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __ARM_COMPUTE_CLREDUCTIONMEANKERNEL_H__
#define __ARM_COMPUTE_CLREDUCTIONMEANKERNEL_H__
{
class ICLTensor;
-/** Interface for the reduction operation kernel */
+/**
+ * @brief Class to define interface for the reduction operation kernel
+ */
class CLReductionMeanKernel : public ICLKernel
{
public:
- /** Default constructor */
+ /**
+ * @brief Default constructor
+ */
CLReductionMeanKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers)
+ */
CLReductionMeanKernel(const CLReductionMeanKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers)
+ */
CLReductionMeanKernel &operator=(const CLReductionMeanKernel &) = delete;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ */
CLReductionMeanKernel(CLReductionMeanKernel &&) = default;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ */
CLReductionMeanKernel &operator=(CLReductionMeanKernel &&) = default;
- /** Default destructor */
+ /**
+ * @brief Default destructor
+ */
~CLReductionMeanKernel() = default;
- /** Set the input and output tensors.
- *
+ /**
+ * @brief Set the input and output tensors.
* @param[in] input Source tensor. Data types supported: F32. Data layouts supported: NCHW.
* @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input.
* Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0, 1
+ * @return N/A
*/
void configure(const ICLTensor *input, ICLTensor *output, std::vector<uint32_t> axis);
- /** Static function to check if given info will lead to a valid configuration of @ref
+ /**
+ * @brief Static function to check if given info will lead to a valid configuration of @ref
* CLReductionMeanKernel.
- *
* @param[in] input Source tensor info. Data types supported: F32. Data layouts supported: NCHW.
* @param[in] output Destination tensor info. Data types and data layouts supported: Same as @p
* input.
* Output will have the same number of dimensions as input.
* @param[in] axis Axis along which to reduce. Supported reduction axis : 0, 1
- *
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output,
std::vector<uint32_t> axis);
- // Inherited methods overridden:
+ /*
+ * @brief Run CLReductionMeanKernel op
+ * @param[in] window Window to be used for in_slice
+ * @param[in] queue CLQueue
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
+ /*
+ * @brief Get border size as BorderSize
+ * @return border size as BorderSize
+ */
BorderSize border_size() const override;
private:
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLStridedSliceKernel.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines CLStridedSliceKernel class
+ */
+
#ifndef __ARM_COMPUTE_CLSTRIDEDSLICEKERNEL_H__
#define __ARM_COMPUTE_CLSTRIDEDSLICEKERNEL_H__
{
class ICLTensor;
-/** Interface for the kernel to extract a strided slice of a tensor */
+/**
+* @brief Class to define an interface for the kernel to extract a strided slice of a tensor
+*/
class CLStridedSliceKernel : public ICLKernel
{
public:
- /** Default constructor */
+ /**
+ * @brief Construct a CLStridedSliceKernel object
+ * */
CLStridedSliceKernel();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers)
+ * */
CLStridedSliceKernel(const CLStridedSliceKernel &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers)
+ * */
CLStridedSliceKernel &operator=(const CLStridedSliceKernel &) = delete;
- /** Allow instances of this class to be moved */
+
+ /**
+ * @brief Construct a CLStridedSliceKernel object by using default move constructor
+ * @param[in] CLStridedSliceKernel object to move
+ * */
CLStridedSliceKernel(CLStridedSliceKernel &&) = default;
- /** Allow instances of this class to be moved */
+
+ /**
+ * @brief Move assignment operator
+ * @param[in] CLStridedSliceKernel object to move
+ * */
CLStridedSliceKernel &operator=(CLStridedSliceKernel &&) = default;
- /** Default destructor */
+
+ /**
+ * @brief Destruct this object
+ * */
~CLStridedSliceKernel() = default;
- /** Set the input and output of the kernel
- *
+
+ /**
+ * @brief Set the input and output of the kernel
* @param[in] input Source tensor. Data type supported:
* U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/F16/F32
* @param[out] output Destination tensor. Data type supported: Same as @p input
* @param[in] beginMask Mask for begin
* @param[in] endMask Mask for end
* @param[in] shrinkAxisMask Mask for shrink axis.
- *
+ * @return N/A
*/
void configure(const ICLTensor *input, ICLTensor *output, ICLTensor *beginData,
ICLTensor *endData, ICLTensor *stridesData, int32_t beginMask, int32_t endMask,
int32_t shrinkAxisMask);
- /** Static function to check if given info will lead to a valid configuration of @ref
+ /**
+ * @brief Static function to check if given info will lead to a valid configuration of @ref
* CLStridedSliceKernel
- *
* @param[in] input The input tensor info. Data types supported:
* U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/F16/F32
* @param[in] output The output tensor info, Data types supported: same as @p input1.
* @param[in] beginMask Mask for begin
* @param[in] endMask Mask for end
* @param[in] shrinkAxisMask Mask for shrink axis.
- *
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output,
const ITensorInfo *stride, int32_t beginMask, int32_t endMask,
int32_t shrinkAxisMask);
- // Inherited methods overridden:
+ /**
+ * @brief Enqueue the OpenCL kernel to process the given window on the passed OpenCL command
+ * queue.
+ * @note The queue is *not* flushed by this method, and therefore the kernel will not have
+ * been executed by the time this method returns.
+ * @param[in] window Region on which to execute the kernel. (Must be a valid region of
+ * the window returned by window()).
+ * @param[in,out] queue Command queue on which to enqueue the kernel.@return N/A
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
private:
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLTopKV2Kernel.h
+ * @brief This file defines classes for TopKV2Kernel
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __ARM_COMPUTE_CLTOPKV2KERNEL_H__
#define __ARM_COMPUTE_CLTOPKV2KERNEL_H__
{
class ICLTensor;
+/**
+ * @brief Class to define CLTopKV2Single
+ */
class CLTopKV2Single : public ICLKernel
{
public:
- /** Constructor */
+ /**
+ * @brief Constructor
+ */
CLTopKV2Single();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLTopKV2Single to be copied
+ */
CLTopKV2Single(const CLTopKV2Single &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLTopKV2Single to be copied
+ * @return Reference of this instance
+ */
CLTopKV2Single &operator=(const CLTopKV2Single &) = delete;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLTopKV2Single to be moved
+ */
CLTopKV2Single(CLTopKV2Single &&) = default;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLTopKV2Single to be moved
+ * @return Reference of this instance
+ */
CLTopKV2Single &operator=(CLTopKV2Single &&) = default;
+ /**
+ * @brief Initialise kernel with params
+ * @param[in] input An input tensor
+ * @param[in] topk_values Values of the top k predictions
+ * @param[in] topk_indices Indices of the top k predictions
+ * @param[in] indices Indices
+ * @param[in] temp_stack Temp stack
+ * @param[in] k K of the top k predictions
+ * @param[in] n Number times to quick-sort
+ * return N/A
+ */
void configure(ICLTensor *input, ICLTensor *topk_values, ICLTensor *topk_indices,
cl::Buffer *indices, cl::Buffer *temp_stack, int k, int n);
- // Inherited methods overridden:
+ /*
+ * @brief Run CLTopKV2Single op
+ * @param[in] window Window to be used for in_slice
+ * @param[in] queue cl::CommandQueue
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
private:
ICLTensor *_topk_indices;
};
+/**
+ * @brief Class to define CLTopKV2Init
+ */
class CLTopKV2Init : public ICLKernel
{
public:
- /** Constructor */
+ /**
+ * @brief Constructor
+ */
CLTopKV2Init();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLTopKV2Init to be copied
+ */
CLTopKV2Init(const CLTopKV2Init &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLTopKV2Init to be copied
+ * @return Reference of this instance
+ */
CLTopKV2Init &operator=(const CLTopKV2Init &) = delete;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLTopKV2Init to be moved
+ */
CLTopKV2Init(CLTopKV2Init &&) = default;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLTopKV2Init to be moved
+ * @return Reference of this instance
+ */
CLTopKV2Init &operator=(CLTopKV2Init &&) = default;
+ /**
+ * @brief Initialise kernel with params
+ * @param[in] input An input tensor
+ * @param[in] in_key_buf Buffer of input key
+ * @param[in] in_ind_buf Buffer of input index
+ * @param[in] n Number times to quick-sort
+ * return N/A
+ */
void configure(ICLTensor *input, cl::Buffer *in_key_buf, cl::Buffer *in_ind_buf, int n);
- // Inherited methods overridden:
+ /*
+ * @brief Run CLTopKV2Init op
+ * @param[in] window Window to be used for in_slice
+ * @param[in] queue cl::CommandQueue
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
private:
ICLTensor *_input;
};
+/**
+ * @brief Class to define CLRadixSortHistogram
+ */
class CLRadixSortHistogram : public ICLKernel
{
public:
- /** Constructor */
+ /**
+ * @brief Constructor
+ */
CLRadixSortHistogram();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLRadixSortHistogram to be copied
+ */
CLRadixSortHistogram(const CLRadixSortHistogram &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLRadixSortHistogram to be copied
+ * @return Reference of this instance
+ */
CLRadixSortHistogram &operator=(const CLRadixSortHistogram &) = delete;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLRadixSortHistogram to be moved
+ */
CLRadixSortHistogram(CLRadixSortHistogram &&) = default;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLRadixSortHistogram to be moved
+ * @return Reference of this instance
+ */
CLRadixSortHistogram &operator=(CLRadixSortHistogram &&) = default;
+ /**
+ * @brief Initialise kernel with params
+ * @param[out] hist_buf Buffer of histogram
+ * @param[in] bits Number of bits to be used for radix sort
+ * @param[in] n Integer number size to sort
+ * return N/A
+ */
void configure(cl::Buffer *hist_buf, int bits, int n);
+ /**
+ * @brief Set pass
+ * @param[in] pass Passes made of in radix sort algorithm
+ * @param[in] in_key_buf Buffer of input key
+ * return N/A
+ */
void setPass(int pass, cl::Buffer *in_key_buf)
{
_pass = pass;
_in_key_buf = in_key_buf;
}
- // Inherited methods overridden:
+ /*
+ * @brief Run CLRadixSortHistogram op
+ * @param[in] window Window to be used for in_slice
+ * @param[in] queue cl::CommandQueue
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
private:
cl::Buffer *_in_key_buf;
};
+/**
+ * @brief Class to define CLRadixSortScanHistogram
+ */
class CLRadixSortScanHistogram : public ICLKernel
{
public:
- /** Constructor */
+ /**
+ * @brief Constructor
+ */
CLRadixSortScanHistogram();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLRadixSortScanHistogram to be copied
+ */
CLRadixSortScanHistogram(const CLRadixSortScanHistogram &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLRadixSortScanHistogram to be copied
+ * @return Reference of this instance
+ */
CLRadixSortScanHistogram &operator=(const CLRadixSortScanHistogram &) = delete;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLRadixSortScanHistogram to be moved
+ */
CLRadixSortScanHistogram(CLRadixSortScanHistogram &&) = default;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLRadixSortScanHistogram to be moved
+ * @return Reference of this instance
+ */
CLRadixSortScanHistogram &operator=(CLRadixSortScanHistogram &&) = default;
+ /**
+ * @brief Initialise kernel with params
+ * @param[out] hist_buf Buffer of histogram
+ * @param[out] glob_sum_buf Buffer of global sum
+ * @param[in] bits Number of bits to be used for radix sort
+ * return N/A
+ */
void configure(cl::Buffer *hist_buf, cl::Buffer *glob_sum_buf, int bits);
- // Inherited methods overridden:
+ /*
+ * @brief Run CLRadixSortScanHistogram op
+ * @param[in] window Window to be used for in_slice
+ * @param[in] queue cl::CommandQueue
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
};
+/**
+ * @brief Class to define CLRadixSortGlobalScanHistogram
+ */
class CLRadixSortGlobalScanHistogram : public ICLKernel
{
public:
- /** Constructor */
+ /**
+ * @brief Constructor
+ */
CLRadixSortGlobalScanHistogram();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLRadixSortGlobalScanHistogram to be copied
+ */
CLRadixSortGlobalScanHistogram(const CLRadixSortGlobalScanHistogram &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLRadixSortGlobalScanHistogram to be copied
+ * @return Reference of this instance
+ */
CLRadixSortGlobalScanHistogram &operator=(const CLRadixSortGlobalScanHistogram &) = delete;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLRadixSortGlobalScanHistogram to be moved
+ */
CLRadixSortGlobalScanHistogram(CLRadixSortGlobalScanHistogram &&) = default;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLRadixSortGlobalScanHistogram to be moved
+ * @return Reference of this instance
+ */
CLRadixSortGlobalScanHistogram &operator=(CLRadixSortGlobalScanHistogram &&) = default;
+ /**
+ * @brief Initialise kernel with params
+ * @param[out] glob_sum_buf Buffer of global sum
+ * @param[out] temp_buf Temp buffer to be used while RadixSortGlobalScanHistogram
+ * @param[in] bits Number of bits to be used for radix sort
+ * return N/A
+ */
void configure(cl::Buffer *glob_sum_buf, cl::Buffer *temp_buf, int bits);
- // Inherited methods overridden:
+ /*
+ * @brief Run CLRadixSortGlobalScanHistogram op
+ * @param[in] window Window to be used for in_slice
+ * @param[in] queue cl::CommandQueue
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
};
+/**
+ * @brief Class to define CLRadixSortPasteHistogram
+ */
class CLRadixSortPasteHistogram : public ICLKernel
{
public:
- /** Constructor */
+ /**
+ * @brief Constructor
+ */
CLRadixSortPasteHistogram();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLRadixSortPasteHistogram to be copied
+ */
CLRadixSortPasteHistogram(const CLRadixSortPasteHistogram &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLRadixSortPasteHistogram to be copied
+ * @return Reference of this instance
+ */
CLRadixSortPasteHistogram &operator=(const CLRadixSortPasteHistogram &) = delete;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLRadixSortPasteHistogram to be moved
+ */
CLRadixSortPasteHistogram(CLRadixSortPasteHistogram &&) = default;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLRadixSortPasteHistogram to be moved
+ * @return Reference of this instance
+ */
CLRadixSortPasteHistogram &operator=(CLRadixSortPasteHistogram &&) = default;
+ /**
+ * @brief Initialise kernel with params
+ * @param[out] hist_buf Buffer of histogram
+ * @param[out] glob_sum_buf Buffer of global sum
+ * @param[in] bits Number of bits to be used for radix sort
+ * return N/A
+ */
void configure(cl::Buffer *hist_buf, cl::Buffer *glob_sum_buf, int bits);
- // Inherited methods overridden:
+ /*
+ * @brief Run CLRadixSortPasteHistogram op
+ * @param[in] window Window to be used for in_slice
+ * @param[in] queue cl::CommandQueue
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
};
+/**
+ * @brief Class to define CLRadixSortReorder
+ */
class CLRadixSortReorder : public ICLKernel
{
public:
- /** Constructor */
+ /**
+ * @brief Constructor
+ */
CLRadixSortReorder();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLRadixSortReorder to be copied
+ */
CLRadixSortReorder(const CLRadixSortReorder &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLRadixSortReorder to be copied
+ * @return Reference of this instance
+ */
CLRadixSortReorder &operator=(const CLRadixSortReorder &) = delete;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLRadixSortReorder to be moved
+ */
CLRadixSortReorder(CLRadixSortReorder &&) = default;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLRadixSortReorder to be moved
+ * @return Reference of this instance
+ */
CLRadixSortReorder &operator=(CLRadixSortReorder &&) = default;
+ /**
+ * @brief Initialise kernel with params
+ * @param[out] hist_buf Buffer of histogram
+ * @param[in] bits Number of bits to be used for radix sort
+ * @param[in] n Integer number size to sort
+ * return N/A
+ */
void configure(cl::Buffer *hist_buf, int bits, int n);
+ /**
+ * @brief Set pass
+ * @param[in] pass Passes made of in radix sort algorithm
+ * @param[in] in_key_buf Buffer of input key
+ * @param[out] out_key_buf Buffer of output key
+ * @param[in] in_ind_buf Buffer of input index
+ * @param[out] out_ind_buf Buffer of output index
+ * return N/A
+ */
void setPass(int pass, cl::Buffer *in_key_buf, cl::Buffer *out_key_buf, cl::Buffer *in_ind_buf,
cl::Buffer *out_ind_buf)
{
_in_ind_buf = in_ind_buf;
_out_ind_buf = out_ind_buf;
}
- // Inherited methods overridden:
+ /*
+ * @brief Run CLRadixSortReorder op
+ * @param[in] window Window to be used for in_slice
+ * @param[in] queue cl::CommandQueue
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
private:
cl::Buffer *_out_ind_buf;
};
+/**
+ * @brief Class to define CLTopKV2FindFirstNegative
+ */
class CLTopKV2FindFirstNegative : public ICLKernel
{
public:
- /** Constructor */
+ /**
+ * @brief Constructor
+ */
CLTopKV2FindFirstNegative();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLTopKV2FindFirstNegative to be copied
+ */
CLTopKV2FindFirstNegative(const CLTopKV2FindFirstNegative &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLTopKV2FindFirstNegative to be copied
+ * @return Reference of this instance
+ */
CLTopKV2FindFirstNegative &operator=(const CLTopKV2FindFirstNegative &) = delete;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLTopKV2FindFirstNegative to be moved
+ */
CLTopKV2FindFirstNegative(CLTopKV2FindFirstNegative &&) = default;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLTopKV2FindFirstNegative to be moved
+ * @return Reference of this instance
+ */
CLTopKV2FindFirstNegative &operator=(CLTopKV2FindFirstNegative &&) = default;
+ /**
+ * @brief Initialise kernel with params
+ * @param[out] first_negative_idx_buf Buffer of the first negative index
+ * @param[in] n Number times to find
+ * return N/A
+ */
void configure(cl::Buffer *first_negative_idx_buf, int n);
+ /**
+ * @brief Set output buffer
+ * @param[out] out_key_buf Buffer of output key
+ * return N/A
+ */
void setOutputBuffer(cl::Buffer *out_key_buf) { _out_key_buf = out_key_buf; }
- // Inherited methods overridden:
+ /*
+ * @brief Run CLTopKV2FindFirstNegative op
+ * @param[in] window Window to be used for in_slice
+ * @param[in] queue cl::CommandQueue
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
private:
cl::Buffer *_out_key_buf;
};
+/**
+ * @brief Class to define CLTopKV2ReorderNegatives
+ */
class CLTopKV2ReorderNegatives : public ICLKernel
{
public:
- /** Constructor */
+ /**
+ * @brief Constructor
+ */
CLTopKV2ReorderNegatives();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLTopKV2ReorderNegatives to be copied
+ */
CLTopKV2ReorderNegatives(const CLTopKV2ReorderNegatives &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLTopKV2ReorderNegatives to be copied
+ * @return Reference of this instance
+ */
CLTopKV2ReorderNegatives &operator=(const CLTopKV2ReorderNegatives &) = delete;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLTopKV2ReorderNegatives to be moved
+ */
CLTopKV2ReorderNegatives(CLTopKV2ReorderNegatives &&) = default;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLTopKV2ReorderNegatives to be moved
+ * @return Reference of this instance
+ */
CLTopKV2ReorderNegatives &operator=(CLTopKV2ReorderNegatives &&) = default;
+ /**
+ * @brief Initialise kernel with params
+ * @param[out] first_negative_idx_buf Buffer of the first negative index
+ * @param[in] n Number times to find
+ * return N/A
+ */
void configure(cl::Buffer *first_negative_idx_buf, int n);
+ /**
+ * @brief Set buffers
+ * @param[in] in_key_buf Buffer of input key
+ * @param[out] out_key_buf Buffer of output key
+ * @param[in] in_ind_buf Buffer of input index
+ * @param[out] out_ind_buf Buffer of output index
+ * return N/A
+ */
void setBuffers(cl::Buffer *in_key_buf, cl::Buffer *out_key_buf, cl::Buffer *in_ind_buf,
cl::Buffer *out_ind_buf)
{
_out_ind_buf = out_ind_buf;
}
- // Inherited methods overridden:
+ /*
+ * @brief Run CLTopKV2ReorderNegatives op
+ * @param[in] window Window to be used for in_slice
+ * @param[in] queue cl::CommandQueue
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
private:
cl::Buffer *_out_ind_buf;
};
+/**
+ * @brief Class to define CLTopKV2Store
+ */
class CLTopKV2Store : public ICLKernel
{
public:
- /** Constructor */
+ /**
+ * @brief Constructor
+ */
CLTopKV2Store();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLTopKV2Store to be copied
+ */
CLTopKV2Store(const CLTopKV2Store &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers).
+ * @param [in] copiedInstance Const reference of CLTopKV2Store to be copied
+ * @return Reference of this instance
+ */
CLTopKV2Store &operator=(const CLTopKV2Store &) = delete;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLTopKV2Store to be moved
+ */
CLTopKV2Store(CLTopKV2Store &&) = default;
- /** Allow instances of this class to be moved */
+ /**
+ * @brief Allow instances of this class to be moved
+ * @param [in] movedInstance Rvalue reference of CLTopKV2Store to be moved
+ * @return Reference of this instance
+ */
CLTopKV2Store &operator=(CLTopKV2Store &&) = default;
+ /**
+ * @brief Initialise kernel with params
+ * @param[out] values Values tensor to store
+ * @param[out] indices Indices tensor to be used for store
+ * @param[in] k K of the top k predictions
+ * @param[in] n Number times to store
+ * return N/A
+ */
void configure(ICLTensor *values, ICLTensor *indices, int k, int n);
+ /**
+ * @brief Set buffers
+ * @param[out] out_key_buf Buffer of output key
+ * @param[out] out_ind_buf Buffer of output index
+ * return N/A
+ */
void setOutputBuffers(cl::Buffer *out_key_buf, cl::Buffer *out_ind_buf);
- // Inherited methods overridden:
+ /*
+ * @brief Run CLTopKV2Store op
+ * @param[in] window Window to be used for in_slice
+ * @param[in] queue cl::CommandQueue
+ * @return N/A
+ */
void run(const Window &window, cl::CommandQueue &queue) override;
private:
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLCast.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains arm_compute::CLCast class
+ */
+
#ifndef __ARM_COMPUTE_CLCAST_H__
#define __ARM_COMPUTE_CLCAST_H__
{
class ICLTensor;
-/** Basic function to run @ref CLCastKernel
- *
- * @note The tensor data type for the inputs must be U8/QASYMM8/S16/S32/F16/F32.
- * @note The function converts the input tensor to the tensor of the output tensor's type.
+/**
+ * @brief Class to run @ref CLCastKernel.
+ * This converts the input tensor to the tensor of the output tensor's type.
*/
class CLCast : public ICLSimpleFunction
{
public:
- /** Initialise the kernel's input and output.
- *
- * @param[in, out] input Input tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32.
- * The input tensor is [in, out] because its TensorInfo might be modified
- * inside the kernel.
- * @param[out] output Output tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32.
+ /**
+ * @brief Initialise the kernel's input and output
+ * @param[in, out] input Input tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32.
+ * The input tensor is [in, out] because its TensorInfo might be
+ * modified inside the kernel.
+ * @param[out] output Output tensor. Data types supported: U8/QASYMM8/S16/S32/F16/F32.
*/
void configure(ICLTensor *input, ICLTensor *output);
};
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLGather.h
+ * @brief This file contains CLGather class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __ARM_COMPUTE_CLGATHER_H__
#define __ARM_COMPUTE_CLGATHER_H__
{
class ICLTensor;
-/** Basic function to run @ref CLGatherKernel. */
+/**
+ * @brief Class to to run @ref CLGatherKernel.
+ */
class CLGather : public ICLSimpleFunction
{
public:
- /** Initialise the kernel's inputs, output and convertion policy.
- *
- * @param[in] input1 An input tensor. Data types supported: U8/S32/F32.
- * @param[in] input2 An indexes tensor. Data types supported: S32.
- * @param[out] output The output tensor, Data types supported: same as @p input1.
- */
+ /**
+ * @brief Initialise the kernel's inputs, output and convertion policy.
+ * @param[in] input1 An input tensor. Data types supported: U8/S32/F32.
+ * @param[in] input2 An indexes tensor. Data types supported: S32.
+ * @param[out] output The output tensor, Data types supported: same as @p input1.
+ * @return N/A
+ */
void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output);
- /** Static function to check if given info will lead to a valid configuration of @ref CLGather
- *
- * @param[in] input1 An input tensor. Data types supported: U8/S32/F32.
- * @param[in] input2 An indexes tensor. Data types supported: S32.
- * @param[out] output The output tensor, Data types supported: same as @p input1.
+
+ /**
+ * @brief Static function to check if given info will lead to a valid configuration
+ * of @ref CLGather
+ * @param[in] input1 An input tensor. Data types supported: U8/S32/F32.
+ * @param[in] input2 An indexes tensor. Data types supported: S32.
+ * @param[out] output The output tensor, Data types supported: same as @p input1.
* @return a status
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLPixelWiseDivision.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains arm_compute::CLPixelWiseDivision class
+ */
#ifndef __ARM_COMPUTE_CLPIXELWISEDIVISION_H__
#define __ARM_COMPUTE_CLPIXELWISEDIVISION_H__
{
class ICLTensor;
-/** Basic function to run @ref CLPixelWiseDivisionKernel. */
+/**
+ * @brief Class to run @ref CLPixelWiseDivisionKernel.
+ */
class CLPixelWiseDivision : public ICLSimpleFunction
{
public:
- /** Initialise the kernel's inputs, output and convertion policy.
- *
- * @param[in, out] input1 An input tensor. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ /**
+ * @brief Initialise the kernel's inputs, output and convertion policy.
+ * @param[in, out] input1 An input tensor. Data types supported: U8/QS8/QS16/S16/F16/F32
* The input tensor is [in, out] because its TensorInfo might be
* modified inside the kernel in case of broadcasting of dimension 0.
* @param[in, out] input2 An input tensor. Data types supported: same as @p input1.
* @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest
* even.
+ * @return N/A
*/
void configure(ICLTensor *input1, ICLTensor *input2, ICLTensor *output, float scale = 1.f,
ConvertPolicy overflow_policy = ConvertPolicy::WRAP,
RoundingPolicy rounding_policy = RoundingPolicy::TO_ZERO);
- /** Static function to check if given info will lead to a valid configuration of @ref
+
+ /**
+ * @brief Static function to check if given info will lead to a valid configuration of @ref
* CLPixelWiseDivision
- *
- * @param[in] input1 An input tensor info. Data types supported: U8/QS8/QS16/S16/F16/F32.
+ * @param[in] input1 An input tensor info. Data types supported: U8/QS8/QS16/S16/F16/F32
* @param[in] input2 An input tensor info. Data types supported: same as @p input1.
* @param[in] output The output tensor info, Data types supported: same as @p input1.
* Note: U8 (QS8, QS16) requires both inputs to be U8 (QS8, QS16).
* where n is between 0 and 15. For QS8 and QS16 scale must be 1.
* @param[in] overflow_policy Overflow policy. Supported overflow policies: Wrap, Saturate
* @param[in] rounding_policy Rounding policy. Supported rounding modes: to zero, to nearest even.
- *
* @return a status
*/
static Status validate(const ITensorInfo *input1, const ITensorInfo *input2,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLReduceMax.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains arm_compute::CLReduceMax class
+ */
+
#ifndef __ARM_COMPUTE_CLREDUCE_MAX_H__
#define __ARM_COMPUTE_CLREDUCE_MAX_H__
{
class ICLTensor;
-/** Basic function to execute TopK operation. This function calls the following OpenCL kernels:
- *
- * -# @ref CLTopKV2Kernel
+/**
+ * @brief Class to execute CLReduceMax operation
*/
class CLReduceMax : public IFunction
{
public:
- /** Constructor */
+ /**
+ * @brief Construct a new CLReduceMax object
+ */
CLReduceMax();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers)
+ */
CLReduceMax(const CLReduceMax &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers)
+ */
CLReduceMax &operator=(const CLReduceMax &) = delete;
- /** Allow instances of this class to be moved */
+
+ /**
+ * @brief Construct a new CLReduceMax object by using copy constructor
+ * @param[in] CLReduceMax object to move
+ */
CLReduceMax(CLReduceMax &&) = default;
- /** Allow instances of this class to be moved */
+
+ /**
+ * @brief Assign a CLReduceMax object.
+ * @param[in] CLReduceMax object to assign. This object will be moved.
+ */
CLReduceMax &operator=(CLReduceMax &&) = default;
- /** Initialise the kernel's inputs and outputs.
- *
- * @note When locations of min and max occurrences are requested, the reported number of locations
- * is limited to the given array size.
- *
- * @param[in] input Input tensor. Data types supported: QASYMM8/S32/F32
- * @param[in] reduce_axis Axises to reduce.
- * @param[out] output Output tensor. Data types supported: Same as @p input.
+
+ /**
+ * @brief Initialise the kernel's inputs and outputs.
+ * @param[in] input Input tensor
+ * @param[in] axis Axis to reduce
+ * @param[out] output The result of ReduceMax operation
+ * @return N/A
*/
void configure(ICLTensor *input, std::vector<uint32_t> reduce_axis, ICLTensor *output);
- /** Static function to check if given info will lead to a valid configuration of @ref
- * CLPixelWiseDivision
- *
- * @param[in] input Input tensor info. Data types supported: QASYMM8/S32/F32
- * @param[in] reduce_axis Axises to reduce.
- * @param[out] output Output tensor info. Data types supported: Same as @p input.
- *
+
+ /**
+ * @brief Static function to check if given info will lead to a valid configuration
+ * @param[in] input Input tensor
+ * @param[in] axis Axis to reduce
+ * @param[out] output The result of ReduceMax operation
* @return a status
*/
static Status validate(const ITensorInfo *input, const std::vector<uint32_t> &reduce_axis,
const ITensorInfo *output);
- // Inherited methods overridden:
+ /**
+ * @brief Run the kernels contained in the function
+ * This operation works on CPU on GPU depending on the value of REDUCE_MAX_RUN_ON_CPU macro
+ * in CLReduceMax.cpp.
+ * If REDUCE_MAX_RUN_ON_CPU == 1, CPU runs this operation.
+ * Otherwise GPU runs this operation.
+ * @return N/A
+ */
void run() override;
private:
* limitations under the License.
*/
+/**
+ * @file CLReductionMean.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains arm_compute::CLReductionMean class
+ */
+
#ifndef __ARM_COMPUTE_CLREDUCTIONMEAN_H__
#define __ARM_COMPUTE_CLREDUCTIONMEAN_H__
{
class ICLTensor;
-/** Perform reduction operation.
+/**
+ * @brief Class to perform ReductionMean operation
*/
class CLReductionMean : public IFunction
{
public:
- /** Default Constructor.
+ /**
+ * @brief Construct a new ReductionMean object
*/
CLReductionMean();
- /** Set the input and output tensors.
- *
- * @param[in] input Source tensor. Data types supported: F32. Data layouts supported: NCHW.
- * @param[out] output Destination tensor. Data types and data layouts supported: Same as @p input.
- * @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1
+ /**
+ * @brief Set the input and output tensors.
+ * @param[in] input Source tensor. Data types supported: F32. Data layouts supported: NCHW
+ * @param[out] output Destination tensor. Data types and data layouts supported: Same as
+ * @p input.
+ * @param[in] axis Axis along which to reduce. Supported reduction axis : 0, 1
+ * @return N/A
*/
void configure(ICLTensor *input, ICLTensor *output, std::vector<uint32_t> axis);
- /** Static function to check if given info will lead to a valid configuration of @ref
+ /**
+ * @brief Static function to check if given info will lead to a valid configuration of @ref
* CLReductionMean.
- *
- * @param[in] input Source tensor info. Data types supported: F32. Data layouts supported: NCHW.
- * @param[in] output Destination tensor info. Data types and data layouts supported: Same as @p
- * input.
- * @param[in] axis Axis along which to reduce. Supported reduction axis : 0,1
- *
+ * @param[in] input Source tensor info. Data types supported: F32. Data layouts supported: NCHW
+ * @param[in] output Destination tensor info. Data types and data layouts supported: Same as @p
+ * input.
+ * @param[in] axis Axis along which to reduce. Supported reduction axis : 0, 1
* @return a status
*/
static Status validate(const ITensorInfo *input, const ITensorInfo *output,
std::vector<uint32_t> axis);
- // Inherited methods overridden:
+ /**
+ * @brief Run the OpenCL kernel for this operation
+ * @return N/A
+ */
void run() override;
private:
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLStridedSlice.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains arm_compute::CLStridedSlice and arm_compute::CLStridedSliceCPU class
+ */
+
#ifndef __ARM_COMPUTE_CLSTRIDEDSLICE_H__
#define __ARM_COMPUTE_CLSTRIDEDSLICE_H__
{
class ICLTensor;
-/** Basic function to run @ref CLStridedSliceKernel */
+/**
+ * @brief Class to run @ref CLStridedSliceKernel
+ */
class CLStridedSlice : public ICLSimpleFunction
{
public:
- /** Initialise the kernel's inputs and outputs
- *
- * @param[in] input First tensor input. Data type supported:
- * U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/F16/F32
- * @param[out] output Output tensor. Data type supported: Same as @p input
+ /**
+ * @brief Initialise the kernel's inputs and outputs
+ * @param[in] input Tensor input. Data type supported:
+ * U8/S8/QS8/QASYMM8/U16/S16/QS16/U32/S32/F16/F32
+ * @param[out] output Output tensor. Data type supported: Same as @p input
+ * @param[in] beginData 'begin' vector of strided slice operation
+ * @param[in] endData 'end' vector of strided slice operation
+ * @param[in] stridesData 'strides' vector of strided slice operation
+ * @param[in] beginMask If the ith bit is set, begin[i] is ignored
+ * @param[in] endMask If the ith bit is set, end[i] is ignored
+ * @param[in] shrinkAxisMask If the ith bit is set, the ith specification shrinks the
+ * dimensionality by 1, taking on the value at index begin[i]
+ * @return N/A
*/
void configure(const ICLTensor *input, ICLTensor *output, ICLTensor *beginData,
ICLTensor *endData, ICLTensor *stridesData, int32_t beginMask, int32_t endMask,
int32_t shrinkAxisMask);
};
+/**
+ * @brief Class to run StridedSlice operation on CPU
+ */
class CLStridedSliceCPU : public IFunction
{
public:
- /** Initialise inputs and outputs
- *
- * @param[in] input First tensor input.
- * @param[out] output Output tensor.
+ /**
+ * @brief Initialise the kernel's inputs and outputs
+ * @param[in] input Tensor input
+ * @param[out] output Output tensor. Data type supported: Same as @p input
+ * @param[in] beginData 'begin' vector of strided slice operation
+ * @param[in] endData 'end' vector of strided slice operation
+ * @param[in] stridesData 'strides' vector of strided slice operation
+ * @param[in] beginMask If the ith bit is set, begin[i] is ignored
+ * @param[in] endMask If the ith bit is set, end[i] is ignored
+ * @param[in] shrinkAxisMask If the ith bit is set, the ith specification shrinks the
+ * dimensionality by 1, taking on the value at index begin[i]
+ * @return N/A
*/
void configure(ICLTensor *input, ICLTensor *output, ICLTensor *beginData, ICLTensor *endData,
ICLTensor *stridesData, int32_t beginMask, int32_t endMask,
int32_t shrinkAxisMask);
+ /**
+ * @brief Run StridedSlice operation on CPU
+ * @return N/A
+ */
void run() override;
private:
* See the License for the specific language governing permissions and
* limitations under the License.
*/
+
+/**
+ * @file CLTopKV2.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file contains arm_compute::CLTopKV2 class
+ */
#ifndef __ARM_COMPUTE_CLTOPK_V2_H__
#define __ARM_COMPUTE_CLTOPK_V2_H__
{
class ICLTensor;
-/** Basic function to execute TopK operation. This function calls the following OpenCL kernels:
- *
- * -# @ref CLTopKV2Kernel
+/**
+ * @brief Class to execute TopKV2 operation.
*/
class CLTopKV2 : public IFunction
{
public:
- /** Constructor */
+ /**
+ * @brief Construct a new CLTopKV2 object
+ */
CLTopKV2();
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers)
+ */
CLTopKV2(const CLTopKV2 &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers)
+ */
CLTopKV2 &operator=(const CLTopKV2 &) = delete;
- /** Allow instances of this class to be moved */
+
+ /**
+ * @brief Construct a new CLTopKV2 object by using copy constructor
+ * @param[in] CLTopKV2 object to move
+ */
CLTopKV2(CLTopKV2 &&) = default;
- /** Allow instances of this class to be moved */
+
+ /**
+ * @brief Assign a CLTopKV2 object.
+ * @param[in] CLTopKV2 object to assign. This object will be moved.
+ */
CLTopKV2 &operator=(CLTopKV2 &&) = default;
- /** Initialise the kernel's inputs and outputs.
- *
- * @note When locations of min and max occurrences are requested, the reported number of locations
- * is limited to the given array size.
- *
+
+ /**
+ * @brief Initialise the kernel's inputs and outputs.
* @param[in] input Input image. Data types supported: U8/S16/F32.
* @param[in] k The value of `k`.
* @param[out] values Top k values. Data types supported: S32 if input type is U8/S16, F32 if
* input type is F32.
- * @param[out] indices indices related to top k values. Data types supported: S32 if input type
+ * @param[out] indices Indices related to top k values. Data types supported: S32 if input type
* is U8/S16, F32 if input type is F32.
+ * @return N/A
*/
void configure(ICLTensor *input, int k, ICLTensor *values, ICLTensor *indices,
int total_bits = 32, int bits = 4);
- // Inherited methods overridden:
+ /**
+ * @brief Run the kernels contained in the function
+ * Depending on the value of the following environment variables it works differently:
+ * - If the value of environment variable "ACL_TOPKV2" == "GPU_SINGLE",
+ * quick sort on GPU is used.
+ * - If the value of environment variable "ACL_TOPKV2" == ""GPU"",
+ * radix sort on GPU is used.
+ * - For other value, TopKV2 runs on CPU
+ * @return N/A
+ */
void run() override;
private:
* limitations under the License.
*/
+/**
+ * @file topk_v2.h
+ * @brief This file contains TopK method and TopContainer class for TopK operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __NNFW_RT_OPTIMIZED_OPS_TOPK_V2_H__
#define __NNFW_RT_OPTIMIZED_OPS_TOPK_V2_H__
{
namespace optimized_ops
{
-// The follwing codes are impemented and modified while referring to TFLite topk_v2.cc file.
-// TopK_v2 of NN Runtime supports TENSOR_FLOAT32, TENSOR_QUANT8_ASYMM, TENSOR_INT32 other than
-// TFLite.
-//(TFLite additionaly supports kTfLiteInt64.)
-
-// The class that collects top indexes of k values. Based on template
-// tensorflow::gtl::TopN<> but, for optimization,
-// it re-uses the same container.
+/**
+ * @brief class to define TopK operation
+ * @note The follwing codes are impemented and modified while referring to TFLite topk_v2.cc file.
+ * TopK_v2 of NN Runtime supports TENSOR_FLOAT32, TENSOR_QUANT8_ASYMM, TENSOR_INT32 other than
+ * TFLite.
+ * (TFLite additionaly supports kTfLiteInt64.)
+ *
+ * The class that collects top indexes of k values. Based on template
+ * tensorflow::gtl::TopN<> but, for optimization,
+ * it re-uses the same container.
+ */
template <typename T> class TopContainer
{
public:
+ /**
+ * @brief Prevent default constructor of of this class
+ */
TopContainer() = delete;
+ /**
+ * @brief Constructor with params
+ * @param [in] row_size Size of row in data
+ * @param [in] k The top k predictions
+ */
TopContainer(int32 k, int32 row_size) : k_(k), container_(), values_(nullptr)
{
container_.reserve(std::min(k, row_size) + 1);
}
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /**
+ * @brief Prevent instances of this class from being copied (As this class contains pointers)
+ * @param [in] topContainer To copy
+ */
TopContainer(const TopContainer &) = delete;
- /** Prevent instances of this class from being copied (As this class contains pointers) */
+ /*
+ * @brief Prevent instances of this class from being copied (As this class contains pointers)
+ * @param [in] topContainer To copy
+ * @return Reference of TopContainer
+ */
TopContainer &operator=(const TopContainer &) = delete;
+ /**
+ * @brief Start collecting
+ * @param [in] values To set as values
+ * @return N/A
+ */
void start_collecting(const T *values)
{
values_ = values;
container_.clear();
}
+ /**
+ * @brief Push a value to be compared for topk
+ * @param [in] a A value to compare
+ * @return N/A
+ */
void push(int32 a)
{
auto comparator = [this](int32 a, int32 b) { return compare_fun(a, b); };
}
}
+ /**
+ * @brief Get sorted result from pushed values
+ * @return Reference of vector with sorted values
+ */
const std::vector<int32> &sorted_result()
{
auto comparator = [this](int32 a, int32 b) { return compare_fun(a, b); };
}
};
+/**
+ * @brief Operates TopK operation with params
+ * @param [in] row_size Size of row in data
+ * @param [in] num_rows The number of rows in data
+ * @param [in] data To be operated in
+ * @param [in] k The top k predictions
+ * @param [out] output_indexes Indexes of targets in the top k predictions
+ * @param [out] output_values Values of targets in the top k predictions
+ * @return N/A
+ */
template <typename T>
void TopK(int32 row_size, int32 num_rows, const T *data, int32 k, int32 *output_indexes,
T *output_values)
* limitations under the License.
*/
+/**
+ * @file compilation.cc
+ * @brief This file contains ANeuralNetworksCompilation APIs and related classes
+ * @ingroup COM_AI_RUNTIME
+ */
+
#include <NeuralNetworks.h>
// For CLKernelLibraryEx initialization
};
}
+/**
+ * @brief Structure to provide interface methods of compilation plan builder
+ */
struct IPlanBuilder
{
+ /**
+ * @brief Destruct IPlanBuilder object using default destructor
+ */
virtual ~IPlanBuilder() = default;
+ /**
+ * @brief Add TensorInfo with Shape Constraints
+ * @param [in] ind Index of operand
+ * @param [in] info TensorInfo value to set to index of operand
+ * @return N/A
+ */
virtual void addShapeConstr(const ::internal::tflite::operand::Index &ind,
const ::arm_compute::TensorInfo &info) = 0;
+ /**
+ * @brief Add Subsumption constraints
+ * @param [in] ind Index of operand
+ * @param [in] base Index of base operand of Subsumption
+ * @param [in] offset Offset of Subsumption
+ * @param [in] shape Shape of Subsumption
+ * @param [in] extend_parent extend_parent value of Subsumption
+ * @return N/A
+ */
virtual void addSubsumptionConstr(const ::internal::tflite::operand::Index &ind,
const ::internal::tflite::operand::Index &base,
const ::arm_compute::Coordinates &offset,
const ::arm_compute::TensorShape &shape,
bool extend_parent = false) = 0;
+ /**
+ * @brief Add Initializer lambda with ITensor param
+ * @param [in] ind Index of operand
+ * @param [in] initializer Initializer to add
+ * @return N/A
+ */
virtual void addInitializer(const ::internal::tflite::operand::Index &ind,
const Initializer &initializer) = 0;
+ /**
+ * @brief Add Stage lambda with IAllocationContext and IExecutionBuilder params
+ * @param [in] s Stage to add
+ * @return N/A
+ */
virtual void addStage(const Stage &) = 0;
};
::internal::arm_compute::Plan &_plan;
};
+/**
+ * @brief Class to provide methods of compilation plan builder
+ */
class PlanBuilder final : public IPlanBuilder
{
public:
+ /**
+ * @brief Construct a new PlanBuilder object with Plan
+ * @param [in] plan The Plan object
+ */
PlanBuilder(::internal::arm_compute::Plan &plan) : _plan{plan}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Add TensorInfo with Shape Constraints
+ * @param [in] ind Index of operand
+ * @param [in] info TensorInfo value to set to index of operand
+ * @return N/A
+ */
void addShapeConstr(const ::internal::tflite::operand::Index &ind,
const ::arm_compute::TensorInfo &info) override;
public:
+ /**
+ * @brief Add Subsumption constraints
+ * @param [in] ind Index of operand
+ * @param [in] base Index of base operand of Subsumption
+ * @param [in] offset Offset of Subsumption
+ * @param [in] shape Shape of Subsumption
+ * @param [in] extend_parent extend_parent value of Subsumption
+ * @return N/A
+ */
void addSubsumptionConstr(const ::internal::tflite::operand::Index &ind,
const ::internal::tflite::operand::Index &base,
const ::arm_compute::Coordinates &offset,
const ::arm_compute::TensorShape &shape, bool extend_parent) override;
public:
+ /**
+ * @brief Add Initializer lambda with ITensor param
+ * @param [in] ind Index of operand
+ * @param [in] initializer Initializer to add
+ * @return N/A
+ */
void addInitializer(const ::internal::tflite::operand::Index &ind,
const Initializer &initializer) override;
public:
+ /**
+ * @brief Add Stage lambda with IAllocationContext and IExecutionBuilder params
+ * @param [in] stage Stage to add
+ * @return N/A
+ */
void addStage(const Stage &stage) override;
public:
+ /**
+ * @brief Finilize(build) the Plan
+ * @return N/A
+ */
void finalize(void) const;
private:
* limitations under the License.
*/
+/**
+ * @file compilation.h
+ * @brief This file defines ANeuralNetworksCompilation class for handling Compilation NNAPI
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __COMPILATION_H__
#define __COMPILATION_H__
#include "internal/Model.h"
#include "internal/arm_compute.h"
+/**
+ * @brief struct to define Compilation of NNAPI
+ */
struct ANeuralNetworksCompilation
{
public:
+ /**
+ * @brief Construct with params
+ * @param [in] model Pointer of internal::tflite::Model to set internal::arm_compute::Plan
+ */
ANeuralNetworksCompilation(const std::shared_ptr<const internal::tflite::Model> &model)
: _plan{new internal::arm_compute::Plan{model}}
{
}
public:
+ /**
+ * @brief Get reference of internal::arm_compute::Plan
+ * @return Reference of internal::arm_compute::Plan
+ */
internal::arm_compute::Plan &plan(void) { return *_plan; }
public:
+ /**
+ * @brief Publish internal Plan to param
+ * @param [out] plan Pointer of internal::arm_compute::Plan to be set
+ * @return N/A
+ */
void publish(std::shared_ptr<const internal::arm_compute::Plan> &plan) { plan = _plan; }
+ /**
+ * @brief Get @c true if ANeuralNetworksCompilation_finish has been called, otherwise @c false
+ * @return @c true if ANeuralNetworksCompilation_finish has been called, otherwise @c false
+ */
bool isFinished(void) { return _isFinished; }
+ /**
+ * @brief Mark compilation process finished
+ * @return N/A
+ */
void markAsFinished() { _isFinished = true; }
private:
* limitations under the License.
*/
+/**
+ * @file event.h
+ * @brief This file defines ANeuralNetworksEvent struct for handling Event NNAPI
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __EVENT_H__
#define __EVENT_H__
+/**
+ * @brief struct to define Event of NNAPI
+ */
struct ANeuralNetworksEvent
{
};
* limitations under the License.
*/
+/**
+ * @file execution.h
+ * @brief This file contains ANeuralNetworksExecution class for handling Execution NNAPI such as
+ * ANeuralNetworksExecution_create, ANeuralNetworksExecution_setInput
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __EXECUTION_H__
#define __EXECUTION_H__
#include "internal/Sink.h"
#include "internal/Source.h"
+/**
+ * @brief struct to express Execution of NNAPI
+ */
struct ANeuralNetworksExecution
{
public:
+ /**
+ * @brief Construct with params
+ * @param [in] plan Pointer to get internal::arm_compute::Plan
+ */
ANeuralNetworksExecution(const std::shared_ptr<const internal::arm_compute::Plan> &plan)
: _plan{plan}
{
}
public:
+ /**
+ * @brief Get reference of internal::arm_compute::Plan
+ * @return Const reference of internal::arm_compute::Plan
+ */
const internal::arm_compute::Plan &plan(void) const { return *_plan; }
private:
std::shared_ptr<const internal::arm_compute::Plan> _plan;
public:
+ /**
+ * @brief Set the nth source with param
+ * @param [in] n Index of the nth source
+ * @param [in] source Pointer to set the nth source from
+ * @return N/A
+ */
// TODO Use InputIndex instead of int
void source(int n, std::unique_ptr<Source> &&source) { _sources.at(n) = std::move(source); }
+ /**
+ * @brief Set the nth source with param
+ * @param [in] n Index of the nth source
+ * @param [in] args Arguments to set the nth source from
+ * @return N/A
+ */
template <typename T, typename... Args> void source(int n, Args &&... args)
{
source(n, std::unique_ptr<T>{new T{std::forward<Args>(args)...}});
}
public:
+ /**
+ * @brief Get the nth source
+ * @param [in] n Index of the nth source
+ * @return Const reference of Source
+ */
const Source &source(int n) const { return *(_sources.at(n)); }
public:
+ /**
+ * @brief Set the nth sink with param
+ * @param [in] n Index of the nth sink
+ * @param [in] sink Pointer to set the nth sink from
+ * @return N/A
+ */
// TODO Use OutputIndex instead of int
void sink(int n, std::unique_ptr<Sink> &&sink) { _sinks.at(n) = std::move(sink); }
+ /**
+ * @brief Set the nth sink with param
+ * @param [in] n Index of the nth sink
+ * @param [in] args Arguments to set the nth sink from
+ * @return N/A
+ */
template <typename T, typename... Args> void sink(int n, Args &&... args)
{
sink(n, std::unique_ptr<T>{new T{std::forward<Args>(args)...}});
}
public:
+ /**
+ * @brief Get the nth sink
+ * @param [in] n Index of the nth sink
+ * @return Const reference of Sink
+ */
const Sink &sink(int n) const { return *(_sinks.at(n)); }
private:
* limitations under the License.
*/
+/**
+ * @file FeatureSink.h
+ * @brief This file contains FeatureSink class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_FEATURE_SINK_H__
#define __INTERNAL_FEATURE_SINK_H__
#include <util/feature/Shape.h>
#include "util/feature/IndexIterator.h"
-//
-// FeatureSink
-//
+/**
+ * @brief Class to store Feature(4D) output data.
+ * This is for pulling data to internal tensor from other tensor.
+ * @tparam T Type of the data elements
+ */
template <typename T> class FeatureSink final : public Sink
{
public:
+ /**
+ * @brief Construct a FeatureSink object
+ *
+ * @param[in] shape 4D tensor dimensions for this feature
+ * @param[in] base Base pointer of the actual data
+ * @param[in] size Size of the data
+ */
FeatureSink(const nnfw::util::feature::Shape &shape, T *base, const size_t size)
: _shape{shape}, _base{base}, _size{size}
{
}
public:
+ /**
+ * @brief Pull the data into the internal structure
+ * @param[in] tensor The tensor which contains source data
+ * @return N/A
+ */
void pull(::arm_compute::ITensor &tensor) const override
{
const ::internal::arm_compute::feature::View<T> from{&tensor};
* limitations under the License.
*/
+/**
+ * @file FeatureSource.h
+ * @brief This file contains FeatureSource class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_FEATURE_SOURCE_H__
#define __INTERNAL_FEATURE_SOURCE_H__
#include "internal/nnapi/feature/Reader.h"
#include "internal/arm_compute/feature/View.h"
+/**
+ * @brief Class to store feature(4D) input data.
+ * This is for push out the data to another tensor.
+ * @tparam T Type of the data elements
+ */
template <typename T> class FeatureSource final : public Source
{
public:
+ /**
+ * @brief Construct a FeatureSource object
+ *
+ * @param[in] shape 4D tensor dimensions for this feature
+ * @param[in] base Base pointer of the actual data
+ * @param[in] size Size of the data
+ */
FeatureSource(const nnfw::util::feature::Shape &shape, const T *base, const size_t size)
: _shape{shape}, _base{base}, _size{size}
{
}
public:
+ /**
+ * @brief Push the data out to the another tensor
+ * @param[out] The tensor that output data will be stored
+ * @return N/A
+ */
void push(::arm_compute::ITensor &tensor) const override
{
const ::internal::nnapi::feature::Reader<T> from{_shape, _base, _size};
* limitations under the License.
*/
+/**
+ * @file IExecutionBuilder.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines interface of ExecutionBuilder
+ */
#ifndef __INTERNAL_IEXECUTION_BUILDER_H__
#define __INTERNAL_IEXECUTION_BUILDER_H__
#include <memory>
#include <string>
+/**
+ * @brief Struct to define interface of ExecutionBuilder
+ */
struct IExecutionBuilder
{
+ /**
+ * @brief Destroy the IExecutionBuilder object
+ */
virtual ~IExecutionBuilder() = default;
+ /**
+ * @brief Append function to execute
+ * @param[in] name Name of function
+ * @param[in] f Function to append
+ * @return N/A
+ */
virtual void append(const std::string &name, std::unique_ptr<::arm_compute::IFunction> &&f) = 0;
};
* limitations under the License.
*/
+/**
+ * @file MatrixSink.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines MatrixSink class
+ */
#ifndef __INTERNAL_MATRIX_SINK_H__
#define __INTERNAL_MATRIX_SINK_H__
#include <cstring>
#include <cassert>
+/**
+ * @brief Class to get matrix data from arm compute tensor
+ */
template <typename T> class MatrixSink final : public Sink
{
public:
+ /**
+ * @brief Construct a new Matrix Sink object
+ * @param[in] H Height of matrix
+ * @param[in] W Width of matrix
+ * @param[in] base Pointer to get data
+ * @param[in] size Size of matrix
+ */
MatrixSink(const int32_t H, const int32_t W, T *base, const size_t size)
: _height{H}, _width{W}, _base{base}
{
}
public:
+ /**
+ * @brief Get matrix data from arm compute tensor to base
+ * @param[in] tensor Tensor object of arm compute to get data
+ * @return N/A
+ */
void pull(::arm_compute::ITensor &tensor) const override
{
assert(tensor.info()->dimension(0) == _width);
* limitations under the License.
*/
+/**
+ * @file MatrixSource.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines MatrixSource class
+ */
#ifndef __INTERNAL_MATRIX_SOURCE_H__
#define __INTERNAL_MATRIX_SOURCE_H__
#include "internal/Source.h"
+/**
+ * @brief Class to push matrix data to arm compute tensor
+ */
template <typename T> class MatrixSource final : public Source
{
public:
+ /**
+ * @brief Construct a new MatrixSource object
+ * @param[in] shape Shape of matrix
+ * @param[in] base Pointer of matrix data to push
+ * @param[in] size Size of matrix
+ */
MatrixSource(const nnfw::util::matrix::Shape &shape, const T *base, const size_t size)
: _shape{shape}, _base{base}, _size{size}
{
}
public:
+ /**
+ * @brief Push matrix data to arm compute tensor
+ * @param[out] tensor Tensor object of arm compute to push matrix data
+ * @return N/A
+ */
void push(::arm_compute::ITensor &tensor) const override
{
using ::arm_compute::Window;
* limitations under the License.
*/
+/**
+ * @file Model.h
+ * @brief This file contains classes for handle internal Model object
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_MODEL_H__
#define __INTERNAL_MODEL_H__
namespace operand
{
+/**
+ * @brief Class to express index of operand.
+ */
class Index
{
public:
+ /**
+ * @brief Construct a new Index object for operand with param.
+ * @param [in] value The number of index
+ */
explicit Index(int value) : _value{value}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get index value as int
+ * @return Index value as int
+ */
int asInt(void) const { return _value; }
private:
namespace operand
{
+/**
+ * @brief Class to express shape of operand.
+ */
struct Shape : public nnfw::util::tensor::Shape
{
public:
+ /**
+ * @brief Construct a new Shape object for operand with param.
+ * @param [in] rank The rank value of shape
+ */
Shape(uint32_t rank);
public:
+ /**
+ * @brief Get dimension value of tensor as vector
+ * @return Dimension value(int32_t) of tensor as vector
+ */
int32_t asVector(void) const;
+ /**
+ * @brief Get dimension values of tensor as feature::Shape
+ * @return Dimension values of tensor as feature::Shape
+ */
nnfw::util::feature::Shape asFeature(void) const;
+ /**
+ * @brief Get dimension values of tensor as matrix::Shape
+ * @return Dimension values of tensor as matrix::Shape
+ */
nnfw::util::matrix::Shape asMatrix(void) const;
+ /**
+ * @brief Get dimension values of tensor as kernel::Shape
+ * @return Dimension values of tensor as kernel::Shape
+ */
nnfw::util::kernel::Shape asKernel(void) const;
+ /**
+ * @brief Get dimension values of tensor::Shape
+ * @return Dimension values of tensor::Shape
+ */
nnfw::util::tensor::Shape asTensor(void) const;
public:
+ /**
+ * @brief Extend rank of Shape object for operand with param.
+ * @param [in] to_rank The rank value to be extended to
+ * @return N/A
+ */
void extendRank(size_t);
};
namespace operand
{
+/**
+ * @brief Class to have data of operand.
+ */
struct Data
{
+ /**
+ * @brief Destruct this object
+ */
virtual ~Data() = default;
+ /**
+ * @brief Get size of data
+ * @return size of data
+ */
virtual size_t size(void) const = 0;
+ /**
+ * @brief Get the base address of data
+ * @return the base address of data
+ */
virtual const uint8_t *base(void) const = 0;
};
+/**
+ * @brief Class to have cached data of operand.
+ */
class CachedData final : public Data
{
public:
+ /**
+ * @brief Construct a new CachedData object for operand with param.
+ * @param [in] base the base address of data
+ * @param [in] size the size of data
+ */
CachedData(const uint8_t *base, size_t size) : _base{new uint8_t[size]}, _size{size}
{
std::copy(base, base + size, _base);
}
public:
+ /**
+ * @brief Destruct this object
+ */
~CachedData() { delete[] _base; }
public:
+ /**
+ * @brief Get size of data
+ * @return size of data
+ */
size_t size(void) const override { return _size; }
+ /**
+ * @brief Get the base address of data
+ * @return the base address of data
+ */
const uint8_t *base(void) const override { return _base; }
private:
size_t _size;
};
+/**
+ * @brief Class to have external data of operand.
+ */
class ExternalData final : public Data
{
public:
+ /**
+ * @brief Construct a new ExternalData object for operand with param.
+ * @param [in] base the base address of data
+ * @param [in] size the size of data
+ */
ExternalData(const uint8_t *base, size_t size) : _base{base}, _size{size}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get size of data
+ * @return size of data
+ */
size_t size(void) const override { return _size; }
+ /**
+ * @brief Get the base address of data
+ * @return the base address of data
+ */
const uint8_t *base(void) const override { return _base; }
private:
namespace operand
{
+/**
+ * @brief Class to express operand as object.
+ */
class Object
{
public:
+ /**
+ * @brief Construct a new Object object for operand with param.
+ * @param [in] shape shape of operand
+ * @param [in] type type of operand
+ * @param [in] scale scale of operand
+ * @param [in] zeroPoint zeroPoint of operand
+ */
explicit Object(const Shape &shape, const int32_t type, const float scale,
const int32_t zeroPoint)
: _shape{shape}, _type{type}, _scale{scale}, _zeroPoint{zeroPoint}
}
public:
+ /**
+ * @brief Get shape of operand
+ * @return Reference of shape of operand
+ */
const Shape &shape(void) const { return _shape; }
+ /**
+ * @brief Get type of operand
+ * @return type of operand
+ */
const int32_t type(void) const { return _type; }
+ /**
+ * @brief Get scale of operand
+ * @return scale of operand
+ */
const float scale(void) const { return _scale; }
+ /**
+ * @brief Get zeroPoint of operand
+ * @return zeroPoint of operand
+ */
const int32_t zeroPoint(void) const { return _zeroPoint; }
private:
void data(std::unique_ptr<Data> &&data) { _data = std::move(data); }
public:
+ /**
+ * @brief Get data of operand
+ * @return Reference of data of operand
+ */
const Data &data(void) const { return *_data; }
+ /**
+ * @brief Get true if Object has data, otherwise @c false
+ * @return @c true if Object has data, otherwise @c false
+ */
bool hasData(void) const { return _data != nullptr; }
public:
+ /**
+ * @brief Set data of operand with param
+ * @param [in] args arguments of data to be set
+ * @return N/A
+ */
template <typename T, typename... Args> void data(Args &&... args)
{
data(std::unique_ptr<T>(new T{std::forward<Args>(args)...}));
}
public:
+ /**
+ * @brief Get value of data as scalar
+ * @return value of data as scalar
+ */
template <typename T> T asScalar(void) const
{
assert((_shape.rank() == 0) || ((_shape.rank() == 1) && (_shape.dim(0) == 1)));
}
public:
+ /**
+ * @brief Get value of data as ReorderBits
+ * @param [in] numOfBits The number of bits to be reordered to
+ * @return value of data as ReorderBits
+ */
template <typename T> T asReorderBits(size_t numOfBits) const
{
assert((_shape.rank() == 0) || ((_shape.rank() == 1) && (_shape.dim(0) == 1)));
namespace operand
{
+/**
+ * @brief Class to have object instances in a kind of set
+ */
class Set
{
public:
+ /**
+ * @brief Iterate objects with fn
+ * @param [in] fn function to be iterated
+ * @return N/A
+ */
void iterate(const std::function<void(const Index &)> &fn)
{
for (uint32_t n = 0; n < _objects.size(); ++n)
}
public:
+ /**
+ * @brief Append Object for operand with param
+ * @param [in] shape shape of operand
+ * @param [in] type type of operand
+ * @param [in] scale scale of operand
+ * @param [in] zeroPoint zeroPoint of operand
+ * @return Value of Index which has been appended to
+ */
Index append(const Shape &, int32_t type, float scale, int32_t zeroPoint);
public:
+ /**
+ * @brief Get Object at Index
+ * @param [in] index Index to be at
+ * @return Const refernece of Object
+ */
const Object &at(const Index &) const;
+ /**
+ * @brief Get Object at Index
+ * @param [in] index Index to be at
+ * @return Refernece of Object
+ */
Object &at(const Index &);
+ /**
+ * @brief Get size of operands in Set
+ * @return Value of size
+ */
size_t size(void) const { return _objects.size(); }
bool exist(const Index &) const;
namespace op
{
+/**
+ * @brief Class to have sequence operators.
+ */
class Sequence
{
public:
+ /**
+ * @brief Construct a new Sequence object for operator as default
+ */
Sequence() = default;
public:
+ /**
+ * @brief Get size of operators in Sequence
+ * @return Value of size
+ */
uint32_t size(void) const { return _ops.size(); }
public:
+ /**
+ * @brief Get op::Node at Index
+ * @param [in] nth index to be at
+ * @return Refernece of op::Node
+ */
op::Node &at(uint32_t nth) { return *(_ops.at(nth)); }
+ /**
+ * @brief Get op::Node at Index
+ * @param [in] nth index to be at
+ * @return Const refernece of op::Node
+ */
const op::Node &at(uint32_t nth) const { return *(_ops.at(nth)); }
private:
}
public:
+ /**
+ * @brief Add op::Node with param
+ * @param [in] args arguments of op::Node to be set
+ * @return Reference of Sequence
+ */
template <typename T, typename... Args> Sequence &emplace_back(Args &&... args)
{
return emplace_back(std::unique_ptr<T>(new T{std::forward<Args>(args)...}));
namespace tflite
{
+/**
+ * @brief Class to have operand::Set as operands and op::Sequence as operators
+ */
class Model
{
public:
+ /**
+ * @brief Get operand::Set
+ * @return Reference of operand::Set
+ */
operand::Set &operands(void) { return _operands; }
+ /**
+ * @brief Get operand::Set
+ * @return Const reference of operand::Set
+ */
const operand::Set &operands(void) const { return _operands; }
public:
+ /**
+ * @brief Get op::Sequence
+ * @return Reference of op::Sequence
+ */
op::Sequence &operations(void) { return _operations; }
+ /**
+ * @brief Get op::Sequence
+ * @return Const reference of op::Sequence
+ */
const op::Sequence &operations(void) const { return _operations; }
private:
public:
// TODO Hide these fields
- std::vector<operand::Index> inputs;
- std::vector<operand::Index> outputs;
+ std::vector<operand::Index> inputs; /**< indexes of operand as input */
+ std::vector<operand::Index> outputs; /**< indexes of operand as output */
};
} // namespace tflite
* limitations under the License.
*/
+/**
+ * @file Sink.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines Sink struct
+ */
#ifndef __INTERNAL_SINK_H__
#define __INTERNAL_SINK_H__
#include <arm_compute/core/ITensor.h>
+/**
+ * @brief Struct to get tensor data from arm compute tensor (abstract)
+ */
struct Sink
{
+ /**
+ * @brief Destroy the Sink object
+ */
virtual ~Sink() = default;
+ /**
+ * @brief Get tensor data from arm compute tensor
+ * @param[in] tensor Tensor object of arm compute to get data
+ * @return N/A
+ */
virtual void pull(::arm_compute::ITensor &tensor) const = 0;
};
* limitations under the License.
*/
+/**
+ * @file Sinks.h
+ * @brief This file contains TensorSink class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_SINKS_H__
#define __INTERNAL_SINKS_H__
#include "util/tensor/IndexIterator.h"
+/**
+ * @brief Class to store NN model output data for general-shaped tensors.
+ * This is for pulling data to internal tensor from other tensor.
+ * @tparam T Type of the data elements
+ */
template <typename T> class TensorSink final : public Sink
{
public:
+ /**
+ * @brief Construct a TensorSink object
+ *
+ * @param[in] shape general-shaped tensor dimensions
+ * @param[in] base Base pointer of the actual data
+ * @param[in] size Size of the data
+ */
TensorSink(const nnfw::util::tensor::Shape &shape, T *base, const size_t size)
: _shape{shape}, _base{base}, _size{size}
{
}
public:
+ /**
+ * @brief Pull the data into the internal structure
+ * @param[in] tensor The tensor which contains source data
+ * @return N/A
+ */
void pull(::arm_compute::ITensor &tensor) const override
{
const ::internal::arm_compute::tensor::View<T> from{&tensor};
* limitations under the License.
*/
+/**
+ * @file Source.h
+ * @brief This file contains Source struct for pushing ITensor
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_SOURCE_H__
#define __INTERNAL_SOURCE_H__
#include <arm_compute/core/ITensor.h>
+/**
+ * @brief Struct to push inner source to ITensor.
+ */
struct Source
{
+ /**
+ * @brief Destructor as default
+ */
virtual ~Source() = default;
+ /**
+ * @brief Push inner source to ITensor
+ * @param [in] tensor ITensor to be pushed into
+ * @return N/A
+ */
virtual void push(::arm_compute::ITensor &tensor) const = 0;
};
* limitations under the License.
*/
+/**
+ * @file Swizzle.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines ARMComputeAxis class and utility functions to support mapping
+ * between arm compute axis and NNAPI axis
+ */
#ifndef __SWIZZLE_H__
#define __SWIZZLE_H__
+/**
+ * @brief Class to represent arm compute axis
+ */
class ARMComputeAxis
{
public:
+ /**
+ * @brief Construct a new ARMComputeAxis object
+ */
ARMComputeAxis() = default;
public:
+ /**
+ * @brief Construct a new ARMComputeAxis object
+ * @param[in] value Raw axis number
+ */
explicit ARMComputeAxis(uint32_t value) : _value{value}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get raw axis number
+ * @return Raw axis number
+ */
uint32_t value(void) const { return _value; }
private:
uint32_t _value;
};
-// Convert T/F Lite / NNAPI axis (based on ...NHWC) to ARMCompute axis (WHCN...)
+/**
+ * @brief Convert T/F Lite / NNAPI axis (based on ...NHWC) to arm compute axis (WHCN...)
+ * @param[in] rank Rank of shape
+ * @param[in] axis Axis to map
+ * @return ARMComputeAxis including arm compute axis info
+ */
inline ARMComputeAxis ToARMComputeAxis(uint32_t rank, uint32_t axis)
{
assert(rank > axis);
#include <cassert>
+/**
+ * @brief Covert bitmask info from NNAPI axis to arm compute axis
+ * @param[in] in Bitmask data
+ * @param[in] numOfBits Used bits (rank)
+ * @return Coverted bitmask
+ */
template <typename T> inline T ReorderBits(T in, size_t numOfBits)
{
assert(numOfBits > 0);
* limitations under the License.
*/
+/**
+ * @file Tensor3DSink.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines Tensor3DSink class
+ */
#ifndef __TENSOR3D_SINK_H__
#define __TENSOR3D_SINK_H__
#include <arm_compute/core/Window.h>
#include <arm_compute/core/Helpers.h>
+/**
+ * @brief Class to get tensor data from arm compute tensor
+ */
template <typename T> class Tensor3DSink final : public Sink
{
public:
+ /**
+ * @brief Construct a new Tensor3DSink object
+ * @param[in] shape Shape of tensor
+ * @param[in] base Pointer to get data
+ * @param[in] size Size of tensor
+ */
Tensor3DSink(const nnfw::util::tensor::Shape &shape, T *base, const size_t size)
: _shape{shape}, _base{base}, _size{size}
{
}
public:
+ /**
+ * @brief Get tensor data from arm compute tensor to base
+ * @param[in] tensor Tensor object of arm compute to get data
+ * @return N/A
+ */
void pull(::arm_compute::ITensor &tensor) const override
{
using ::arm_compute::Window;
* limitations under the License.
*/
+/**
+ * @file Tensor3DSource.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines Tensor3DSource class
+ */
#ifndef __TENSOR3D_SOURCE_H__
#define __TENSOR3D_SOURCE_H__
#include <arm_compute/core/Window.h>
#include <arm_compute/core/Helpers.h>
+/**
+ * @brief Class to push tensor data to arm compute tensor
+ */
template <typename T> class Tensor3DSource final : public Source
{
public:
+ /**
+ * @brief Construct a new Tensor3DSource object
+ * @param[in] shape Shape of tensor
+ * @param[in] base Pointer of tensor data to push
+ * @param[in] size Size of tensor
+ */
Tensor3DSource(const nnfw::util::tensor::Shape &shape, const T *base, const size_t size)
: _shape{shape}, _base{base}, _size{size}
{
}
public:
+ /**
+ * @brief Push tensor data to arm compute tensor
+ * @param[out] tensor Tensor object of arm compute to push tensor data
+ * @return N/A
+ */
void push(::arm_compute::ITensor &tensor) const override
{
using ::arm_compute::Window;
* limitations under the License.
*/
+/**
+ * @file TensorSource.h
+ * @brief This file contains TensorSource class which is inherited from Source class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_TENSOR_SOURCE_H__
#define __INTERNAL_TENSOR_SOURCE_H__
#include "internal/arm_compute/tensor/View.h"
// NOTE TensorSource is much slower than specialized Source(s)
+/**
+ * @brief Class to define constructor and push function
+ */
template <typename T> class TensorSource final : public Source
{
public:
+ /**
+ * @brief Construct a new TensorSource object with params
+ * @param [in] shape Shape of tensor
+ * @param [in] base Base address
+ * @param [in] size Size of tensor
+ */
TensorSource(const nnfw::util::tensor::Shape &shape, const T *base, const size_t size)
: _shape{shape}, _base{base}, _size{size}
{
}
public:
+ /**
+ * @brief Function for pushing tensor
+ * @param [in] tensor Tensor to be pushed
+ * @return N/A
+ */
void push(::arm_compute::ITensor &tensor) const override
{
const ::internal::nnapi::tensor::Reader<T> from{_shape, _base, _size};
* limitations under the License.
*/
+/**
+ * @file VectorSink.h
+ * @brief This file contains VectorSink class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_VECTOR_SINK_H__
#define __INTERNAL_VECTOR_SINK_H__
#include <cassert>
-//
-// VectorSink
-//
+/**
+ * @brief Class to store vector(2D) output data.
+ * This is for pulling out the data to another tensor.
+ * @tparam T Type of the data elements
+ */
template <typename T> class VectorSink final : public Sink
{
public:
+ /**
+ * @brief Construct a VectorSink object
+ * @param[in] vlen Length of the vector
+ * @param[in] base Base pointer of the actual data
+ * @param[in] size Size of the data
+ */
VectorSink(const int32_t vlen, T *base, const size_t size) : _vlen{vlen}, _base{base}
{
assert(size >= _vlen * sizeof(T));
}
public:
+ /**
+ * @brief Pull the data into the internal structure
+ * @param[in] tensor The tensor which contains source data
+ * @return N/A
+ */
void pull(::arm_compute::ITensor &tensor) const override
{
for (int32_t n = 0; n < _vlen; ++n)
* limitations under the License.
*/
+/**
+ * @file VectorSource.h
+ * @brief This file contains VectorSource class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_VECTOR_SOURCE_H__
#define __INTERNAL_VECTOR_SOURCE_H__
#include "internal/Source.h"
+/**
+ * @brief Class to store vector(2D) input data.
+ * This is for push out the data to another tensor.
+ * @tparam T Type of the data elements
+ */
template <typename T> class VectorSource final : public Source
{
public:
+ /**
+ * @brief Construct a VectorSource object
+ * @param[in] vlen Length of the vector
+ * @param[in] base Base pointer of the actual data
+ * @param[in] size Size of the data
+ */
VectorSource(const int32_t vlen, const T *base, const size_t size) : _vlen{vlen}, _base{base}
{
assert(size >= _vlen * sizeof(T));
}
public:
+ /**
+ * @brief Push the data out to the another tensor
+ * @param[out] The tensor that output data will be stored
+ * @return N/A
+ */
void push(::arm_compute::ITensor &tensor) const override
{
for (int32_t n = 0; n < _vlen; ++n)
* limitations under the License.
*/
+/**
+ * @file arm_compute.h
+ * @brief This file contains arm_compute library related classes
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_ARM_COMPUTE_H__
#define __INTERNAL_ARM_COMPUTE_H__
namespace operand
{
+/**
+ * @brief Class to access the tensor object
+ */
class Object
{
public:
}
public:
+ /**
+ * @brief Get the tensor pointer
+ * @return The tensor pointer
+ */
::arm_compute::ITensor *ptr(void) const { return _tensor.get(); }
private:
std::shared_ptr<::arm_compute::ITensor> _tensor;
public:
+ /**
+ * @brief Access the tensor object and run the given function
+ : @param[in] fn The actual behavior when accessing the tensor object
+ * @return N/A
+ */
void access(const std::function<void(::arm_compute::ITensor &tensor)> &fn) const;
};
namespace operand
{
+/**
+ * @brief Class to manage Object instances
+ */
class Context
{
public:
+ /**
+ * @brief Set index and tensor pair
+ * @param[in] ind The operand index
+ * @param[in] tensor The tensor object
+ * @return This object reference
+ */
Context &set(const ::internal::tflite::operand::Index &ind,
const std::shared_ptr<::arm_compute::ITensor> &tensor);
public:
+ /**
+ * @brief Check if the tensor for given index is exist
+ * @param[in] ind The operand Index
+ * @return @c true if the entry for ind is exist, otherwise @c false
+ */
bool exist(const ::internal::tflite::operand::Index &ind) const
{
return _objects.find(ind.asInt()) != _objects.end();
}
public:
+ /**
+ * @brief Lookup the tensor with the given index
+ * @param[in] ind The index as the key
+ * @return The object const reference
+ */
const Object &at(const ::internal::tflite::operand::Index &ind) const
{
return _objects.at(ind.asInt());
}
+ /**
+ * @brief Lookup the tensor with the given index
+ * @param[in] ind The index as the key
+ * @return The object reference
+ */
Object &at(const ::internal::tflite::operand::Index &ind) { return _objects.at(ind.asInt()); }
private:
namespace op
{
+/**
+ * @brief Class to wrap IFunction
+ */
class Step
{
public:
+ /**
+ * @brief Construct a Step object
+ * @param[in] func The compiled code to be executed
+ */
Step(std::unique_ptr<::arm_compute::IFunction> &&func) : _func{std::move(func)}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Run _func
+ * @return N/A
+ */
void run(void) const { _func->run(); }
public:
+ /**
+ * @brief Get member @c _name
+ * @return The name as const reference
+ */
const std::string &name(void) const { return _name; }
+ /**
+ * @brief Get member @c _name
+ * @return The name as reference
+ */
std::string &name(void) { return _name; }
private:
std::unique_ptr<::arm_compute::IFunction> _func;
#ifdef TFLITE_PROFILING_ENABLED
public:
+ /**
+ * @brief Get member @c _op_index
+ * @return The operation index as value
+ */
int op_idx() const { return _op_idx; }
+ /**
+ * @brief Get member @c _op_index
+ * @return The operation index as reference
+ */
int &op_idx() { return _op_idx; }
private:
int _op_idx;
namespace op
{
+/**
+ * @brief Class managing compiled operation code Sequence
+ */
class Sequence
{
public:
+ /**
+ * @brief Get size of sequence
+ * @return Number of sequence steps
+ */
uint32_t size(void) const { return _functions.size(); }
public:
+ /**
+ * @brief Append a Function to the sequence
+ * @param[in] func Function to be appended
+ * @return This object reference
+ */
Sequence &append(std::unique_ptr<::arm_compute::IFunction> &&func)
{
_functions.emplace_back(std::move(func));
}
public:
+ /**
+ * @brief Get the step entry on the index @c n
+ * @param[in] n The index
+ * @return The step object as reference
+ */
Step &at(uint32_t n) { return _functions.at(n); }
+ /**
+ * @brief Get the step entry on the index @c n
+ * @param[in] n The index
+ * @return The step object as const reference
+ */
const Step &at(uint32_t n) const { return _functions.at(n); }
private:
namespace arm_compute
{
+/**
+ * @brief Class to manage compiled operation sequence
+ */
class Plan
{
public:
+ /**
+ * @brief Construct a Plan object
+ * @param[in] model Model that we want to compile
+ */
Plan(const std::shared_ptr<const ::internal::tflite::Model> &model) : _model(model)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get the model object
+ * @return The model object as const reference
+ */
const ::internal::tflite::Model &model(void) const { return *_model; }
public:
+ /**
+ * @brief Get operand context
+ * @return The operand context as reference
+ */
operand::Context &operands(void) { return _operands; }
+ /**
+ * @brief Get operand context
+ * @return The operand context as const reference
+ */
const operand::Context &operands(void) const { return _operands; }
public:
+ /**
+ * @brief Get operation sequence
+ * @return The operation sequence as reference
+ */
op::Sequence &operations(void) { return _ops; }
+ /**
+ * @brief Get operation sequence
+ * @return The operation sequence as const reference
+ */
const op::Sequence &operations(void) const { return _ops; }
private:
namespace arm_compute
{
-// check if this runtime runs on GPU or NEON
+/**
+ * @brief Check if this runtime runs on GPU or NEON
+ * @return @c true if GPU mode, otherwise @c false
+ */
bool isGpuMode();
#define CAST_CL(tensor) static_cast<::arm_compute::CLTensor *>(tensor)
* limitations under the License.
*/
+/**
+ * @file Cast.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines casting functions from internal object to arm compute object
+ */
#ifndef __ARM_COMPUTE_CAST_H__
#define __ARM_COMPUTE_CAST_H__
#include "internal/Model.h"
+/**
+ * @brief Generate arm compute coordiate object from rank
+ * @param[in] rank Rank number
+ * @return Coordinate object
+ */
::arm_compute::Coordinates getARMComputeAxises(uint32_t rank);
::arm_compute::Coordinates asARMComputeCoordinates(const ::arm_compute::Coordinates &runtime_coord,
const ::arm_compute::Coordinates &axises);
+/**
+ * @brief Cast from shape of internal to arm compute
+ * @param[in] shape Internal shape object
+ * @param[in] apply_dim_correction Flag to state whether apply dimension correction after setting
+ * one dimension in arm compute
+ * @return TensorShape object of arm compute
+ */
::arm_compute::TensorShape asTensorShape(const internal::tflite::operand::Shape &shape,
bool apply_dim_correction = true);
+/**
+ * @brief Cast from data type enum of NNAPI to arm compute
+ * @param[in] type NNAPI data type
+ * @return Data type of arm compute
+ */
::arm_compute::DataType asDataType(const int32_t type);
+/**
+ * @brief Cast from NNAPI activation type enum to activation object of arm compute
+ * @param[in] code NNAPI activation type
+ * @return ActivationLayerInfo object of arm compute
+ */
::arm_compute::ActivationLayerInfo asActivationInfo(FuseCode code);
+/**
+ * @brief Generate quantization info object of arm compute
+ * @param[in] scale Scale of quantization
+ * @param[in] offset Offset of quantization
+ * @return QuantizationInfo object of arm compute
+ */
::arm_compute::QuantizationInfo asQuantizationInfo(const float scale, const int32_t offset);
+/**
+ * @brief Cast from internal tensor info to tensor info object of arm compute
+ * @param[in] shape Tensor shape
+ * @param[in] type Tensor type
+ * @param[in] scale Scale of tensor quantization
+ * @param[in] zeroPoint Zeropoint of tensor quantization
+ * @return TensorInfo object of arm compute
+ */
::arm_compute::TensorInfo asTensorInfo(const ::arm_compute::TensorShape &shape, const int32_t type,
const float scale = 0.0f, const int32_t zeroPoint = 0);
+/**
+ * @brief Set value to arm compute tensor with casting
+ * @param[in] value Value to set
+ * @param[out] to Target tensor of arm compute
+ * @param[in] id Position of element
+ * @return N/A
+ */
template <typename FromT>
void copyCast(const FromT value, ::arm_compute::ITensor *to, const ::arm_compute::Coordinates &id)
{
* limitations under the License.
*/
+/**
+ * @file View.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::arm_compute::feature::View class
+ */
#ifndef __INTERNAL_ARM_COMPUTE_FEATURE_VIEW_H__
#define __INTERNAL_ARM_COMPUTE_FEATURE_VIEW_H__
namespace feature
{
+/**
+ * @brief Class to access feature's element
+ */
template <typename T> class View final : public nnfw::util::feature::Reader<T>
{
public:
+ /**
+ * @brief Construct a new View object
+ * @param[in] tensor Feature to support access
+ */
View(::arm_compute::ITensor *tensor) : _tensor{tensor}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get value of element in 3D feature using channel, row and column
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Value of element
+ */
T at(uint32_t ch, uint32_t row, uint32_t col) const override
{
const auto offset = feature_index_to_byte_offset(ch, row, col);
return *ptr;
}
+ /**
+ * @brief Get value of element in 4D feature using batch, channel, row and column
+ * @param[in] batch Batch index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Value of element
+ */
T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
{
const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
}
public:
+ /**
+ * @brief Get reference of element in 3D feature using channel, row and column
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Reference of element
+ */
T &at(uint32_t ch, uint32_t row, uint32_t col)
{
const auto offset = feature_index_to_byte_offset(ch, row, col);
return *ptr;
}
+ /**
+ * @brief Get reference of element in 4D feature using batch, channel, row and column
+ * @param[in] batch Batch index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Reference of element
+ */
T &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
{
const auto offset = feature_index_to_byte_offset(batch, ch, row, col);
}
private:
+ /**
+ * @brief Get offset of element in 3D feature
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Offset of element
+ */
size_t feature_index_to_byte_offset(uint32_t ch, uint32_t row, uint32_t col) const
{
// ARM Compute uses CHW ordering
return _tensor->info()->offset_element_in_bytes(::arm_compute::Coordinates{col, row, ch});
}
+ /**
+ * @brief Get offset of element in 4D feature
+ * @param[in] batch Batch index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Offset of element
+ */
size_t feature_index_to_byte_offset(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const
{
// ARM Compute uses CHW ordering
* limitations under the License.
*/
+/**
+ * @file View.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internel::arm_compute::kernel::View class
+ */
#ifndef __INTERNAL_ARM_COMPUTE_KERNEL_VIEW_H__
#define __INTERNAL_ARM_COMPUTE_KERNEL_VIEW_H__
namespace kernel
{
+/**
+ * @brief Class to access kernel's element
+ */
template <typename T> class View final : public nnfw::util::kernel::Reader<T>
{
public:
+ /**
+ * @brief Construct a new View object
+ * @param[in] tensor Kernel to support access
+ */
View(::arm_compute::ITensor *tensor) : _tensor{tensor}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get value of element in kernel
+ * @param[in] nth Kernel index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Value of element
+ */
T at(uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) const override
{
const auto offset = kernel_index_to_byte_offset(nth, ch, row, col);
}
public:
+ /**
+ * @brief Get reference of element in kernel
+ * @param[in] nth Kernel index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Reference of element
+ */
T &at(uint32_t nth, uint32_t ch, uint32_t row, uint32_t col)
{
const auto offset = kernel_index_to_byte_offset(nth, ch, row, col);
}
private:
+ /**
+ * @brief Get offset of element in kernel
+ * @param[in] nth Kernel index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Offset of element
+ */
size_t kernel_index_to_byte_offset(uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) const
{
return _tensor->info()->offset_element_in_bytes(::arm_compute::Coordinates{col, row, ch, nth});
* limitations under the License.
*/
+/**
+ * @file View.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::arm_compute::matrix::View class
+ */
#ifndef __INTERNAL_ARM_COMPUTE_MATRIX_VIEW_H__
#define __INTERNAL_ARM_COMPUTE_MATRIX_VIEW_H__
namespace matrix
{
+/**
+ * @brief Class to access matrix's element
+ */
template <typename T> class View final : public nnfw::util::matrix::Reader<T>
{
public:
+ /**
+ * @brief Construct a new View object
+ * @param[in] tensor Matrix to support access
+ */
View(::arm_compute::ITensor *tensor) : _tensor{tensor}
{
// DO NOTHING
}
public:
+ /**
+ * @brief Get value of element in matrix
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Value of element
+ */
T at(uint32_t row, uint32_t col) const override
{
const auto offset = matrix_index_to_byte_offset(row, col);
}
public:
+ /**
+ * @brief Get reference of element in matrix
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Refence of element
+ */
T &at(uint32_t row, uint32_t col)
{
const auto offset = matrix_index_to_byte_offset(row, col);
}
private:
+ /**
+ * @brief Get offset of element in matrix
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Offset of element
+ */
size_t matrix_index_to_byte_offset(uint32_t row, uint32_t col) const
{
return _tensor->info()->offset_element_in_bytes(::arm_compute::Coordinates{col, row});
* limitations under the License.
*/
+/**
+ * @file View.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::arm_compute::tensor::View class
+ */
#ifndef __INTERNAL_ARM_COMPUTE_TENSOR_VIEW_H__
#define __INTERNAL_ARM_COMPUTE_TENSOR_VIEW_H__
namespace tensor
{
+/**
+ * @brief Class to access tensor's element
+ */
template <typename T> class View
{
public:
+ /**
+ * @brief Construct a new View object
+ * @param[in] tensor Tensor to support access
+ */
View(::arm_compute::ITensor *tensor) : _tensor{tensor}
{
// DO NOTHING
}
private:
+ /**
+ * @brief Get offset of element in tensor
+ * @param[in] index Index of element
+ * @return Offset of element
+ */
uint32_t byte_offset_of(const nnfw::util::tensor::Index &index) const
{
// NOTE index.rank() >= _tensor->info()->num_dimensions() should hold here
}
public:
+ /**
+ * @brief Get value of element in tensor
+ * @param[in] index Index of element
+ * @return Value of element
+ */
T at(const nnfw::util::tensor::Index &index) const
{
const auto offset = byte_offset_of(index);
return *ptr;
}
+ /**
+ * @brief Get reference of element in tensor
+ * @param[in] index Index of element
+ * @return Reference of element
+ */
T &at(const nnfw::util::tensor::Index &index)
{
const auto offset = byte_offset_of(index);
* limitations under the License.
*/
+/**
+ * @file FeatureLoggingLayer.h
+ * @brief This file contains FeatureLoggingLayer class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __FEATURE_LOGGING_LAYER_H__
#define __FEATURE_LOGGING_LAYER_H__
#include "internal/arm_compute.h"
+/**
+ * @brief Class to run FeatureLogging Layer
+ */
class FeatureLoggingLayer : public ::arm_compute::IFunction
{
public:
+<<<<<<< HEAD
FeatureLoggingLayer(void) : _tag(""), _target(nullptr)
{
// DO NOTHING
}
public:
+=======
+ /**
+ * @brief Configure the layer
+ * @param[in] tag Text tag for this layer
+ * @param[in] target The feature tensor to be printed
+ * @return N/A
+ */
+>>>>>>> 796f8027... [doxygen] Add comments for Added layers (#2968)
void configure(const std::string &tag, ::arm_compute::ITensor *target)
{
_tag = tag;
}
public:
+ /**
+ * @brief Run the operation. Must be called after configure().
+ * @return N/A
+ */
void run(void) override
{
if (::internal::arm_compute::isGpuMode())
* limitations under the License.
*/
+/**
+ * @file GenericFullyConnectedLayer.h
+ * @brief This file contains GenericFullyConnectedLayer class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __GENERIC_FULLY_CONNECTED_LAYER_H__
#define __GENERIC_FULLY_CONNECTED_LAYER_H__
#include <arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h>
#include "internal/layers/GenericReshapeLayer.h"
+/**
+ * @brief Class to run FullyConnected Layer with both CPU and GPU
+ */
class GenericFullyConnectedLayer : public ::arm_compute::IFunction
{
public:
}
public:
+ /**
+ * @brief Configure the layer
+ * @param[in] input The source tensor
+ * @param[in] weights The tensor that is filled with weight values
+ * @param[in] biases The tensor that is filled with biase values
+ * @param[in] output The destination tensor
+ * @param[in] needs_reshape Whether it needs to be reshaped or not
+ * @param[in] reshape The tensor shape to be reshaped. Only valid when needs_reshape is true.
+ * @return N/A
+ */
void configure(::arm_compute::ITensor *input, ::arm_compute::ITensor *weights,
::arm_compute::ITensor *biases, ::arm_compute::ITensor *output, bool needs_reshape,
::arm_compute::TensorShape reshape);
public:
+ /**
+ * @brief Run the operation. Must be called after configure().
+ * @return N/A
+ */
void run(void) override;
private:
* limitations under the License.
*/
+/**
+ * @file GenericReshapeLayer.h
+ * @brief This file contains GenericReshapeLayer class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __GENERIC_RESHAPE_LAYER_H__
#define __GENERIC_RESHAPE_LAYER_H__
#include <arm_compute/runtime/NEON/functions/NEPermute.h>
#include <arm_compute/runtime/NEON/functions/NEReshapeLayer.h>
+/**
+ * @brief Class to run Reshape Layer with both CPU and GPU
+ */
class GenericReshapeLayer : public ::arm_compute::IFunction
{
public:
}
public:
+ /**
+ * @brief Configure the layer
+ * @param[in] input The source tensor
+ * @param[in] output The destination tensor
+ * @return N/A
+ */
void configure(::arm_compute::ITensor *input, ::arm_compute::ITensor *output);
public:
+ /**
+ * @brief Run the operation. Must be called after configure().
+ * @return N/A
+ */
void run(void) override;
private:
* limitations under the License.
*/
+/**
+ * @file PadLayer.h
+ * @brief This file contains PadLayer class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __PAD_LAYER_H__
#define __PAD_LAYER_H__
// DO NOTHING
}
+ /**
+ * @brief Configure the layer
+ * @param[in] input The source tensor
+ * @param[in] output The destination tensor
+ * @param[in] border_width The padding size
+ * @return N/A
+ */
void configure(::arm_compute::ITensor *input, ::arm_compute::ITensor *output,
unsigned int border_width);
-
+ /**
+ * @brief Run the operation. Must be called after configure().
+ * @return N/A
+ */
void run(void) override;
private:
::arm_compute::CLFillBorder _fillborderkernel;
::arm_compute::NEFillBorder _nefillborderkernel;
+ /**
+ * @brief Copy the data to output
+ * @return N/A
+ */
void populateOutput();
};
* limitations under the License.
*/
+/**
+ * @file SimpleArithmeticAddition.h
+ * @brief This file contains SimpleArithmeticAddition class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __SIMPLE_ARITHMETIC_ADDITION_H__
#define __SIMPLE_ARITHMETIC_ADDITION_H__
#include "internal/arm_compute.h"
#include <arm_compute/core/ITensor.h>
+/**
+ * @brief Class to run SimpleArithmeticAddition Layer
+ */
class SimpleArithmeticAddition : public ::arm_compute::IFunction
{
public:
// DO NOTHING
}
+ /**
+ * @brief Configure the layer
+ * @param[in] lhs Lefthand-side operand
+ * @param[in] rhs Righthand-side operand
+ * @param[in] out The destination tensor(Result operand)
+ * @return N/A
+ */
void configure(::arm_compute::ITensor *lhs, ::arm_compute::ITensor *rhs,
::arm_compute::ITensor *out)
{
}
public:
+ /**
+ * @brief Run the operation. Must be called after configure().
+ * @return N/A
+ */
void run(void) override
{
if (::internal::arm_compute::isGpuMode())
* limitations under the License.
*/
+/**
+ * @file SimpleCastLayer.h
+ * @brief This file contains SimpleCastLayer class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __SIMPLE_CAST_LAYER_H__
#define __SIMPLE_CAST_LAYER_H__
#include "internal/arm_compute.h"
#include "internal/arm_compute/Cast.h"
+/**
+ * @brief Class to run SimpleCast Layer
+ */
class SimpleCastLayer : public ::arm_compute::IFunction
{
public:
// DO NOTHING
}
+ /**
+ * @brief Configure the layer
+ * @param[in] in The source tensor
+ * @param[in] out The destination tensor
+ * @return N/A
+ */
void configure(::arm_compute::ITensor *in, ::arm_compute::ITensor *out);
+ /**
+ * @brief Run the operation. Must be called after configure().
+ * @return N/A
+ */
void run(void) override;
private:
+ /**
+ * @brief Cast and copy data from one tensor to another
+ *
+ * @param[in] in The source tensor
+ * @param[out] out The destination tensor
+ * @param[in] id Coordinates to copy
+ * @return N/A
+ */
void castData(::arm_compute::ITensor *in, ::arm_compute::ITensor *out,
const arm_compute::Coordinates &id);
#include <arm_compute/core/ITensor.h>
#include <arm_compute/runtime/IFunction.h>
+/**
+ * @file SimpleEmbeddingLookup.h
+ * @brief This file contains SimpleEmbeddingLookup class
+ * @ingroup COM_AI_RUNTIME
+ */
+
+/**
+ * @brief Class to run SimpleEmbeddingLookup Layer
+ */
class SimpleEmbeddingLookup : public ::arm_compute::IFunction
{
public:
}
public:
+ /**
+ * @brief Configure the layer
+ * @param[in] lookups 1D tensor which contains lookup values
+ * @param[in] values The source tensor
+ * @param[in] output The destination tensor
+ * @return N/A
+ */
void configure(::arm_compute::ITensor *lookups, ::arm_compute::ITensor *values,
::arm_compute::ITensor *output);
+ /**
+ * @brief Run the operation. Must be called after configure().
+ * @return N/A
+ */
void run() override;
private:
* limitations under the License.
*/
+/**
+ * @file SimpleSpaceToDepth.h
+ * @brief This file contains SimpleSpaceToDepth class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __SIMPLE_SPACE_TO_DEPTH_H__
#define __SIMPLE_SPACE_TO_DEPTH_H__
#include <arm_compute/core/ITensor.h>
#include <arm_compute/runtime/IFunction.h>
+/**
+ * @brief Class to run SimpleEmbeddingLookup Layer
+ */
class SimpleSpaceToDepth : public ::arm_compute::IFunction
{
public:
// DO NOTHING
}
- /** Initialise input and output
- *
- * @param[in] input First tensor input.
- * @param[out] output Output tensor.
- * @param[in] block_size Block size.
+ /**
+ * @brief Configure the layer
+ * @param[in] input First tensor input.
+ * @param[in] output Output tensor.
+ * @param[in] block_size Block size.
+ * @return N/A
*/
void configure(::arm_compute::ITensor *input, ::arm_compute::ITensor *output, int32_t block_size,
const ::arm_compute::Coordinates &axises = getARMComputeAxises(4));
+ /**
+ * @brief Run the operation. Must be called after configure().
+ * @return N/A
+ */
void run() override;
private:
* limitations under the License.
*/
+/**
+ * @file Reader.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::nnapi::feature::Reader
+ */
#ifndef __INTERNAL_NNAPI_FEATURE_READER_H__
#define __INTERNAL_NNAPI_FEATURE_READER_H__
namespace feature
{
+/**
+ * @brief Class to support reading element in feature(3D, 4D)
+ */
template <typename T> class Reader final : public nnfw::util::feature::Reader<T>
{
public:
+ /**
+ * @brief Construct a new Reader object
+ * @param[in] shape Shape of feature
+ * @param[in] ptr Pointer to feature data
+ * @param[in] len Size of tensor (byte)
+ */
// NOTE The parameter len denotes the number of bytes.
Reader(const ::nnfw::util::feature::Shape &shape, const T *ptr, size_t len)
: _shape{shape}, _ptr{ptr}
}
public:
+ /**
+ * @brief Get shape of feature
+ * @return Shape of feature
+ */
const nnfw::util::feature::Shape &shape(void) const { return _shape; }
public:
+ /**
+ * @brief Get value of element using channel, row, and column index for 3D feature
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Value of element
+ */
T at(uint32_t ch, uint32_t row, uint32_t col) const override
{
uint32_t index = index_of(_shape, ch, row, col);
return arr[index];
}
+ /**
+ * @brief Get value of element using batch, channel, row, and column index for 4D feature
+ * @param[in] batch Batch index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Value of element
+ */
T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
{
uint32_t index = index_of(_shape, batch, ch, row, col);
* limitations under the License.
*/
+/**
+ * @file Utils.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines utility functions used in internal::nnapi::feature namespace
+ */
#ifndef __INTERNAL_NNAPI_FEATURE_UTILS_H__
#define __INTERNAL_NNAPI_FEATURE_UTILS_H__
namespace feature
{
+/**
+ * @brief Get position of element using channel, row, and column for 3D feature
+ * @param[in] shape Shape of feature
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Position of element
+ */
inline uint32_t index_of(const ::nnfw::util::feature::Shape &shape, uint32_t ch, uint32_t row,
uint32_t col)
{
return res;
}
+/**
+ * @brief Get position of element using batch, channel, row, and column for 4D feature
+ * @param[in] shape Shape of feature
+ * @param[in] batch Batch index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Position of element
+ */
inline uint32_t index_of(const ::nnfw::util::feature::Shape &shape, uint32_t batch, uint32_t ch,
uint32_t row, uint32_t col)
{
* limitations under the License.
*/
+/**
+ * @file View.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::nnapi::feature::View class
+ */
#ifndef __INTERNAL_NNAPI_FEATURE_VIEW_H__
#define __INTERNAL_NNAPI_FEATURE_VIEW_H__
namespace feature
{
+/**
+ * @brief Class to access feature's element information using index
+ */
template <typename T> class View final : public nnfw::util::feature::Reader<T>
{
public:
+ /**
+ * @brief Construct a new View object
+ * @param[in] shape Shape of feature
+ * @param[in] ptr Pointer to feature data
+ * @param[in] len Size of feature (byte)
+ * @return
+ */
// NOTE The parameter len denotes the number of bytes.
View(const ::nnfw::util::feature::Shape &shape, T *ptr, size_t len) : _shape{shape}, _ptr{ptr}
{
}
public:
+ /**
+ * @brief Get shape of feature
+ * @return Shape of feature
+ */
const nnfw::util::feature::Shape &shape(void) const { return _shape; }
public:
+ /**
+ * @brief Get value of element in 3D feature using channel, row, and column index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Value of element
+ */
T at(uint32_t ch, uint32_t row, uint32_t col) const override
{
uint32_t index = index_of(_shape, ch, row, col);
return _ptr[index];
}
+
+ /**
+ * @brief Get value of element in 4D feature using batch, channel, row and column index
+ * @param[in] batch Batch index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Value of element
+ */
T at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) const override
{
uint32_t index = index_of(_shape, batch, ch, row, col);
return _ptr[index];
}
+ /**
+ * @brief Get reference of element in 3D feature using channel, row, and column index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Reference of element
+ */
T &at(uint32_t ch, uint32_t row, uint32_t col)
{
uint32_t index = index_of(_shape, ch, row, col);
return _ptr[index];
}
+
+ /**
+ * @brief Get reference of element in 4D feature using batch, channel, row and column index
+ * @param[in] batch Batch index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Reference of element
+ */
T &at(uint32_t batch, uint32_t ch, uint32_t row, uint32_t col)
{
uint32_t index = index_of(_shape, batch, ch, row, col);
* limitations under the License.
*/
+/**
+ * @file Reader.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::nnapi::kernel::Reader class
+ */
#ifndef __INTERNAL_NNAPI_KERNEL_READER_H__
#define __INTERNAL_NNAPI_KERNEL_READER_H__
namespace kernel
{
+/**
+ * @brief Class to support reading element in kernel
+ */
template <typename T> class Reader final : public nnfw::util::kernel::Reader<T>
{
public:
+ /**
+ * @brief Construct a new Reader object
+ * @param[in] shape Shape of kernel
+ * @param[in] ptr Pointer to kernel data
+ * @param[in] len Size of kernel (byte)
+ */
// NOTE The parameter len denotes the number of bytes.
Reader(const ::nnfw::util::kernel::Shape &shape, const T *ptr, size_t len)
: _shape{shape}, _ptr{ptr}
}
public:
+ /**
+ * @brief Get shape of kernel
+ * @return Shape of kernel
+ */
const nnfw::util::kernel::Shape &shape(void) const { return _shape; }
public:
+ /**
+ * @brief Get value of element for kernel
+ * @param[in] nth Kernel index
+ * @param[in] ch Channel index
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Value of element
+ */
T at(uint32_t nth, uint32_t ch, uint32_t row, uint32_t col) const override
{
// NNAPI uses NHWC ordering
* limitations under the License.
*/
+/**
+ * @file Reader.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::nnapi::matrix::Reader class
+ */
#ifndef __INTERNAL_NNAPI_MATRIX_READER_H__
#define __INTERNAL_NNAPI_MATRIX_READER_H__
namespace matrix
{
+/**
+ * @brief Class to support reading element in matrix
+ */
template <typename T> class Reader final : public nnfw::util::matrix::Reader<T>
{
public:
+ /**
+ * @brief Construct a new Reader object
+ * @param[in] shape Shape of matrix
+ * @param[in] ptr Pointer to matrix data
+ * @param[in] len Size of matrix (byte)
+ */
// NOTE The parameter len denotes the number of bytes.
Reader(const ::nnfw::util::matrix::Shape &shape, const T *ptr, size_t len)
: _shape{shape}, _ptr{ptr}
}
public:
+ /**
+ * @brief Get shape of matrix
+ * @return Shape of matrix
+ */
const nnfw::util::matrix::Shape &shape(void) const { return _shape; }
public:
+ /**
+ * @brief Get value of element for matrix
+ * @param[in] row Row index
+ * @param[in] col Column index
+ * @return Value of element
+ */
T at(uint32_t row, uint32_t col) const override
{
// NNAPI uses NHWC ordering
* limitations under the License.
*/
+/**
+ * @file ConstView.h
+ * @brief This file contains ConstView class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_NNAPI_TENSOR_CONST_VIEW_H__
#define __INTERNAL_NNAPI_TENSOR_CONST_VIEW_H__
namespace tensor
{
+/**
+ * @brief Wrapper class to read tensor values
+ * @tparam T The tensor element type
+ */
template <typename T> class ConstView
{
public:
+ /**
+ * @brief Construct a ConstView class
+ * @param[in] shape Tensor shape
+ * @param[in] ptr The base pointer of actual data
+ * @param[in] len The number of bytes
+ */
ConstView(const ::nnfw::util::tensor::Shape &shape, const uint8_t *ptr, size_t len)
: _shape{shape}, _ptr{ptr}, _len{len}
{
private:
// TODO Make this as a helper function, and share it for both View<T> and ConstView<T>
+ /**
+ * @brief Calculate offset for the given tensor index
+ * @param[in] index Tensor index
+ * @return The calculated offset
+ */
uint32_t offset_of(const nnfw::util::tensor::Index &index) const
{
if (_shape.rank() == 0)
}
public:
+ /**
+ * @brief Get the value on the given index
+ * @param[in] index Flattened tensor index
+ * @return The value on the given index
+ */
T at(const nnfw::util::tensor::Index &index) const
{
const auto offset = offset_of(index);
* limitations under the License.
*/
+/**
+ * @file Reader.h
+ * @brief This file contains Reader class
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_NNAPI_TENSOR_READER_H__
#define __INTERNAL_NNAPI_TENSOR_READER_H__
namespace tensor
{
+/**
+ * @brief Wrapper class to read tensor values
+ * @tparam T The tensor element type
+ */
template <typename T> class Reader final : public nnfw::util::tensor::Reader<T>
{
public:
- // NOTE The parameter len denotes the number of bytes.
+ /**
+ * @brief Construct a Reader class
+ * @param[in] shape Tensor shape
+ * @param[in] ptr The base pointer of actual data
+ * @param[in] len The number of bytes
+ */
Reader(const ::nnfw::util::tensor::Shape &shape, const T *ptr, size_t len)
: _shape{shape}, _ptr{ptr}
{
}
public:
+ /**
+ * @brief Get shape object
+ * @return The shape as const reference
+ */
const nnfw::util::tensor::Shape &shape(void) const { return _shape; }
public:
+ /**
+ * @brief Get the value on the given index
+ * @param[in] index_nnapi Flattened tensor index
+ * @return The value on the given index
+ */
T at(const nnfw::util::tensor::Index &index_nnapi) const override
{
uint32_t offset = 0;
}
private:
- /*
- Assuming that shape is [d4, .. , d1] and data is stored at a pointer ptr,
- we need to calculate the offset of index [i4, .. i1] as follows:
- offset = i4 * (d3 * d2 * d1) +
- i3 * (d2 * d1) +
- i2 * (d1) +
- i1
- So (d4 * d3 * d2 * d1) or (d3 * d2 * d1) or (d2 * d1) happens whenever offset is calculate.
- To minimize this repetitive calculation,
- _stridess[n] contains _spape[n-1]*_spape[n-2]*_spape[0]
- */
+ /**
+ * @brief Initializes @c _stridess
+ * @return N/A
+ * @note Assuming that shape is [d4, .. , d1] and data is stored at a pointer ptr,
+ we need to calculate the offset of index [i4, .. i1] as follows:
+ offset = i4 * (d3 * d2 * d1) +
+ i3 * (d2 * d1) +
+ i2 * (d1) +
+ i1
+ So (d4 * d3 * d2 * d1) or (d3 * d2 * d1) or (d2 * d1) happens whenever offset is
+ calculate. To minimize this repetitive calculation,
+ _stridess[n] contains _spape[n-1]*_spape[n-2]*_spape[0]
+ */
void initialize(void)
{
for (int r = 0; r < _shape.rank(); r++)
* limitations under the License.
*/
+/**
+ * @file View.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::nnapi::tensor::View class
+ */
#ifndef __INTERNAL_NNAPI_TENSOR_VIEW_H__
#define __INTERNAL_NNAPI_TENSOR_VIEW_H__
namespace tensor
{
+/**
+ * @brief Class to access tensor's element information using index
+ */
template <typename T> class View
{
public:
+ /**
+ * @brief Construct a new View object
+ * @param[in] shape Shape of tensor
+ * @param[in] ptr Pointer to tensor data
+ * @param[in] len Size of tensor (byte)
+ */
// NOTE The parameter len denotes the number of bytes.
View(const ::nnfw::util::tensor::Shape &shape, T *ptr, size_t len) : _shape{shape}, _ptr{ptr}
{
}
public:
+ /**
+ * @brief Get shape of tensor
+ * @return Shape of tensor
+ */
const nnfw::util::tensor::Shape &shape(void) const { return _shape; }
private:
+ /**
+ * @brief Get position of element using index in tensor
+ * @param[in] index Index of element
+ * @return Position of element
+ */
uint32_t offset_of(const nnfw::util::tensor::Index &index) const
{
if (_shape.rank() == 0)
}
public:
+ /**
+ * @brief Get value of element at index
+ * @param[in] index Index of element
+ * @return Value of element at index
+ */
T at(const nnfw::util::tensor::Index &index) const
{
const auto offset = offset_of(index);
return _ptr[offset];
}
+ /**
+ * @brief Get reference of element at index
+ * @param[in] index Index of element
+ * @return Reference of element at index
+ */
T &at(const nnfw::util::tensor::Index &index)
{
const auto offset = offset_of(index);
* limitations under the License.
*/
+/**
+ * @file Add.h
+ * @brief This file contains accept function and params for Add operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_ADD_H__
#define __INTERNAL_OP_ADD_H__
namespace Add
{
+/**
+ * @brief Struct of Add operation's param
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; /**< Output format index */
- int32_t lhs_index;
- int32_t rhs_index;
- int32_t activation_index;
+ int32_t lhs_index; /**< Left hand side index */
+ int32_t rhs_index; /**< Right hand side index */
+ int32_t activation_index; /**< Activation index */
+ /**
+ * @brief Construct a new Param object for Add as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for Add with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for Add
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for Add with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for Add
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for Add
+ * @return Parameters of Add
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for Add
+ * @param [in] v Node visitor for invoking visit function of Add
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file AvgPool2D.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::AvgPool2D Param structs
+ * and internal::tflite::op::AvgPool2D Node classes
+ */
#ifndef __INTERNAL_OP_AVG_POOL_2D_H__
#define __INTERNAL_OP_AVG_POOL_2D_H__
namespace Explicit
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; /**< Index of output feature map */
- int32_t ifm_index;
+ int32_t ifm_index; /**< Index of input feature map */
- int32_t kw_index;
- int32_t kh_index;
+ int32_t kw_index; /**< Index of kernel width */
+ int32_t kh_index; /**< Index of kernel height */
- int32_t hstride_index;
- int32_t vstride_index;
+ int32_t hstride_index; /**< Index of horizontal stride */
+ int32_t vstride_index; /**< Index of vertical stride */
- int32_t padding_left_index;
- int32_t padding_right_index;
- int32_t padding_top_index;
- int32_t padding_bottom_index;
-
- int32_t activation_index;
+ int32_t padding_left_index; /**< Index of padding left */
+ int32_t padding_right_index; /**< Index of padding right */
+ int32_t padding_top_index; /**< Index of padding top */
+ int32_t padding_bottom_index; /**< Index of padding bottom */
+ int32_t activation_index; /**< Index of activation */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
namespace Implicit
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t ofm_index;
-
- int32_t ifm_index;
+ int32_t ofm_index; /**< Index of output feature map */
- int32_t kw_index;
- int32_t kh_index;
+ int32_t ifm_index; /**< Index of input feature map */
- int32_t hstride_index;
- int32_t vstride_index;
+ int32_t kw_index; /**< Index of kernel width */
+ int32_t kh_index; /**< Index of kernel height */
- int32_t padding_index;
- int32_t activation_index;
+ int32_t hstride_index; /**< Index of horizontal stride */
+ int32_t vstride_index; /**< Index of vertical stride */
+ int32_t padding_index; /**< Index of padding */
+ int32_t activation_index; /**< Index of activation */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Cast.h
+ * @brief This file contains accept function and params for Cast operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_CAST_H__
#define __INTERNAL_OP_CAST_H__
namespace Cast
{
+/**
+ * @brief Struct of Cast operation's param
+ */
struct Param
{
- int32_t output_index;
+ int32_t output_index; /**< Output index */
- int32_t input_index;
+ int32_t input_index; /**< Input index */
+ /**
+ * @brief Construct a new Param object for Cast as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for Cast with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for Cast
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for Cast with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for Cast
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for Cast
+ * @return Parameters of Cast
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for Cast
+ * @param [in] v Node visitor for invoking visit function of Cast
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Concat.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines Concat node
+ */
+
#ifndef __INTERNAL_OP_CONCAT_H__
#define __INTERNAL_OP_CONCAT_H__
namespace Concat
{
+/**
+ * @brief Struct to manipulate parameter for Concat operation
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; //!< index for output
- std::vector<int32_t> ifm_indexes;
- int32_t axis_index;
+ std::vector<int32_t> ifm_indexes; //!< index for input
+ int32_t axis_index; //!< index for axis
+ /**
+ * @brief Default Constructor
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object
+ * @param[in] inputCount the number of inputs
+ * @param[in] inputs pointer for input data
+ * @param[in] outputCount the number of outputs
+ * @param[in] outputs pointer for input data
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define Concat Operation
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Concat Node object
+ * @param param Parameter for Concat Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Default Destructor
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameter
+ * @return Param reference
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Accept a NodeVisitor so that it can visit this node
+ * @param [in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
- const Param _param;
+ const Param _param; //!< parameter for Concat node
};
} // namespace Concat
* limitations under the License.
*/
+/**
+ * @file Conv2D.h
+ * @brief This file contains accept function and params for Conv2D operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_CONV_2D_H__
#define __INTERNAL_OP_CONV_2D_H__
namespace Explicit
{
+/**
+ * @brief Struct of Conv2D(explicit) operation's param
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; /**< Output format index */
- int32_t ifm_index;
- int32_t ker_index;
- int32_t bias_index;
+ int32_t ifm_index; /**< Input format index */
+ int32_t ker_index; /**< Kernel index */
+ int32_t bias_index; /**< Bias index */
- int32_t hstride_index;
- int32_t vstride_index;
+ int32_t hstride_index; /**< Horizontal stride index */
+ int32_t vstride_index; /**< Vertical stride index */
- int32_t padding_left_index;
- int32_t padding_right_index;
- int32_t padding_top_index;
- int32_t padding_bottom_index;
+ int32_t padding_left_index; /**< Left padding index */
+ int32_t padding_right_index; /**< Right padding index */
+ int32_t padding_top_index; /**< Top padding index */
+ int32_t padding_bottom_index; /**< Bottomd padding index */
- int32_t activation_index;
+ int32_t activation_index; /**< Activation index */
+ /**
+ * @brief Construct a new Param object for Conv2D(explicit) as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for Conv2D(explicit) with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for Conv2D(explicit)
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for conv2D(explicit) with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for conv2D(explicit)
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for conv2D(explicit)
+ * @return Parameters of conv2D(explicit)
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for conv2D(explicit)
+ * @param [in] v Node visitor for invoking visit function of conv2D(explicit)
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
namespace Implicit
{
+/**
+ * @brief Struct of Conv2D(implicit) operation's param
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; /**< Output format index */
- int32_t ifm_index;
- int32_t ker_index;
- int32_t bias_index;
+ int32_t ifm_index; /**< Input format index */
+ int32_t ker_index; /**< Kernel index */
+ int32_t bias_index; /**< Bias index */
- int32_t hstride_index;
- int32_t vstride_index;
+ int32_t hstride_index; /**< Horizontal stride index */
+ int32_t vstride_index; /**< Vertical stride index */
- int32_t padding_index;
- int32_t activation_index;
+ int32_t padding_index; /**< Padding index */
+ int32_t activation_index; /**< Activation index */
+ /**
+ * @brief Construct a new Param object for Conv2D(implicit) as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for Conv2D(implicit) with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for Conv2D(implicit)
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for conv2D(implicit) with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for conv2D(implicit)
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for conv2D(implicit)
+ * @return Parameters of conv2D(implicit)
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for conv2D(implicit)
+ * @param [in] v Node visitor for invoking visit function of conv2D(implicit)
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file DepthwiseConv2D.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::DepthwiseConv2D Param structs
+ * and internal::tflite::op::DepthwiseConv2D Node classes
+ */
#ifndef __INTERNAL_OP_DEPTHWISE_CONV_2D_H__
#define __INTERNAL_OP_DEPTHWISE_CONV_2D_H__
namespace Explicit
{
+/**
+ * @brief Struct to have indexes for explicit padding DepthwiseConv2D operation parameter
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; /**< Index of output feature map */
- int32_t ifm_index;
- int32_t ker_index;
- int32_t bias_index;
+ int32_t ifm_index; /**< Index of input feature map */
+ int32_t ker_index; /**< Index of kernel */
+ int32_t bias_index; /**< Index of bias */
- int32_t hstride_index;
- int32_t vstride_index;
+ int32_t hstride_index; /**< Index of horizontal stride */
+ int32_t vstride_index; /**< Index of vertical stride */
- int32_t padding_left_index;
- int32_t padding_right_index;
- int32_t padding_top_index;
- int32_t padding_bottom_index;
-
- int32_t multipler_index;
- int32_t activation_index;
+ int32_t padding_left_index; /**< Index of padding left */
+ int32_t padding_right_index; /**< Index of padding right */
+ int32_t padding_top_index; /**< Index of padding top */
+ int32_t padding_bottom_index; /**< Index of padding bottom */
+ int32_t multipler_index; /**< Index of multipler */
+ int32_t activation_index; /**< Index of activation */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an explicit padding DepthwiseConv2D operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
} // namespace Explicit
+/**
+ * @brief Struct to have indexes for implicit padding DepthwiseConv2D operation parameter
+ */
namespace Implicit
{
struct Param
{
- int32_t ofm_index;
-
- int32_t ifm_index;
- int32_t ker_index;
- int32_t bias_index;
+ int32_t ofm_index; /**< Index of output feature map */
- int32_t hstride_index;
- int32_t vstride_index;
+ int32_t ifm_index; /**< Index of input feature map */
+ int32_t ker_index; /**< Index of kernel */
+ int32_t bias_index; /**< Index of bias */
- int32_t padding_index;
- int32_t multipler_index;
- int32_t activation_index;
+ int32_t hstride_index; /**< Index of horizontal stride */
+ int32_t vstride_index; /**< Index of vertical stride */
+ int32_t padding_index; /**< Index of padding */
+ int32_t multipler_index; /**< Index of multipler */
+ int32_t activation_index; /**< Index of activation */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an implicit padding DepthwiseConv2D operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Dequantize.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::Dequantize::Param struct
+ * and internal::tflite::op::Dequantize::Node class
+ */
#ifndef __INTERNAL_OP_DEQUANTIZE_H__
#define __INTERNAL_OP_DEQUANTIZE_H__
namespace Dequantize
{
+/**
+ * @brief Struct to have indexes for Dequantize operation parameter
+ */
struct Param
{
- int32_t output_index;
-
- int32_t input_index;
+ int32_t output_index; /**< Index of output feature map */
+ int32_t input_index; /**< Index of input feature map */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an Dequantize operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Div.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::Div::Param struct
+ * and internal::tflite::op::Div::Node class
+ */
#ifndef __INTERNAL_OP_DIV_H__
#define __INTERNAL_OP_DIV_H__
namespace Div
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t ofm_index;
-
- int32_t lhs_index;
- int32_t rhs_index;
- int32_t activation_index;
+ int32_t ofm_index; /**< Index of output feature map */
+ int32_t lhs_index; /**< Index of lhs */
+ int32_t rhs_index; /**< Index of rhs */
+ int32_t activation_index; /**< Index of activation */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file EmbeddingLookup.h
+ * @brief This file contains accept function and params for EmbeddingLookup operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_EMBEDDING_LOOKUP_H__
#define __INTERNAL_OP_EMBEDDING_LOOKUP_H__
namespace EmbeddingLookup
{
+/**
+ * @brief Struct of EmbeddingLookup operation's param
+ */
struct Param
{
- int32_t output_index;
+ int32_t output_index; /**< Output index */
- int32_t lookups_index;
- int32_t values_index;
+ int32_t lookups_index; /**< Lookups index */
+ int32_t values_index; /**< Values index */
+ /**
+ * @brief Construct a new Param object for EmbeddingLookup as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for EmbeddingLookup with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for EmbeddingLookup
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for EmbeddingLookup with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for EmbeddingLookup
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for EmbeddingLookup
+ * @return Parameters of EmbeddingLookup
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for EmbeddingLookup
+ * @param [in] v Node visitor for invoking visit function of EmbeddingLookup
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Floor.h
+ * @brief This file contains accept function and params for Floor operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_FLOOR_H__
#define __INTERNAL_OP_FLOOR_H__
namespace Floor
{
+/**
+ * @brief Struct of Floor operation's param
+ */
struct Param
{
- int32_t output_index;
+ int32_t output_index; /**< Output index */
- int32_t input_index;
+ int32_t input_index; /**< Input index */
+ /**
+ * @brief Construct a new Param object for Floor as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for Floor with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for Floor
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for Floor with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for Floor
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for Floor
+ * @return Parameters of Floor
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for Floor
+ * @param [in] v Node visitor for invoking visit function of Floor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file FullyConnected.h
+ * @brief This file contains accept function and params for FullyConnected operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_FULLY_CONNTECTED_H__
#define __INTERNAL_OP_FULLY_CONNTECTED_H__
namespace FullyConnected
{
+/**
+ * @brief Struct of FullyConnected operation's param
+ */
struct Param
{
- int32_t output_index;
+ int32_t output_index; /**< Output index */
- int32_t input_index;
- int32_t weight_index;
- int32_t bias_index;
- int32_t activation_index;
+ int32_t input_index; /**< Input index */
+ int32_t weight_index; /**< Weight index */
+ int32_t bias_index; /**< Bias index */
+ int32_t activation_index; /**< Activation index */
+ /**
+ * @brief Construct a new Param object for FullyConnected as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for FullyConnected with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for FullyConnected
+ */
class Node final : public op::Node
{
+ /**
+ * @brief Construct a new Node object for FullyConnected with param
+ * @param [in] param Parameters for Node
+ */
public:
+ /**
+ * @brief Destroy the Node object for FullyConnected
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for FullyConnected
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Parameter Get parameters for FullyConnected
+ * @return _param Parameters of FullyConnected
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for FullyConnected
+ * @param [in] v Node visitor for invoking visit function of FullyConnected
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Gather.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines Gather operation
+ */
+
#ifndef __INTERNAL_OP_GATHER_H__
#define __INTERNAL_OP_GATHER_H__
namespace Gather
{
+/**
+ * @brief Struct to manipulate parameter for Gather operation
+ */
struct Param
{
- int32_t ofm_index; // output
+ int32_t ofm_index; //!< index for output feature map
- int32_t lhs_index; // input
- int32_t rhs_index; // indexes
- int32_t axis_index; // axis
+ int32_t lhs_index; //!< index for lhs tensor
+ int32_t rhs_index; //!< index for rhs tensor
+ int32_t axis_index; //!< index for axis
+ /**
+ * @brief Default Constructor
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object
+ * @param[in] inputCount the number of inputs
+ * @param[in] inputs pointer for input data
+ * @param[in] outputCount the number of outputs
+ * @param[in] outputs pointer for input data
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define Gather Operation
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Tanh Node object
+ * @param param Parameter for Tanh Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Default Destructor
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameter
+ * @return Param reference
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Accept a NodeVisitor so that it can visit this node
+ * @param [in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
- const Param _param;
+ const Param _param; //!< parameter for Gather node
};
} // namespace Gather
* limitations under the License.
*/
+/**
+ * @file HashtableLookup.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::HashtableLookup::Param struct
+ * and internal::tflite::op::HashtableLookup::Node class
+ */
#ifndef __INTERNAL_OP_HASHTABLE_LOOKUP_H__
#define __INTERNAL_OP_HASHTABLE_LOOKUP_H__
namespace HashtableLookup
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t output_index;
- int32_t hits_index;
-
- int32_t lookups_index;
- int32_t values_index;
- int32_t keys_index;
+ int32_t output_index; /**< Index of output feature map */
+ int32_t hits_index; /**< Index of hits */
+ int32_t lookups_index; /**< Index of lookups */
+ int32_t values_index; /**< Index of values */
+ int32_t keys_index; /**< Index of keys */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file L2Normalization.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::L2Normalization::Param struct
+ * and internal::tflite::op::L2Normalization::Node class
+ */
#ifndef __INTERNAL_OP_L2_NORMALIZATION_H__
#define __INTERNAL_OP_L2_NORMALIZATION_H__
namespace L2Normalization
{
+/**
+ * @brief Struct to have indexes for L2Normalization operation parameter
+ */
struct Param
{
- int32_t ofm_index;
-
- int32_t ifm_index;
+ int32_t ofm_index; /**< Index of output feature map */
+ int32_t ifm_index; /**< Index of input feature map */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an L2Normalization operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file L2Pool2D.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::L2Pool2D Param structs
+ * and internal::tflite::op::L2Pool2D Node classes
+ */
#ifndef __INTERNAL_OP_L2_POOL_2D_H__
#define __INTERNAL_OP_L2_POOL_2D_H__
namespace Explicit
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; /**< Index of output feature map */
- int32_t ifm_index;
+ int32_t ifm_index; /**< Index of input feature map */
- int32_t kw_index;
- int32_t kh_index;
+ int32_t kw_index; /**< Index of kernel width */
+ int32_t kh_index; /**< Index of kernel height */
- int32_t hstride_index;
- int32_t vstride_index;
+ int32_t hstride_index; /**< Index of horizontal stride */
+ int32_t vstride_index; /**< Index of vertical stride */
- int32_t padding_left_index;
- int32_t padding_right_index;
- int32_t padding_top_index;
- int32_t padding_bottom_index;
-
- int32_t activation_index;
+ int32_t padding_left_index; /**< Index of padding left */
+ int32_t padding_right_index; /**< Index of padding right */
+ int32_t padding_top_index; /**< Index of padding top */
+ int32_t padding_bottom_index; /**< Index of padding bottom */
+ int32_t activation_index; /**< Index of activation */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
namespace Implicit
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t ofm_index;
-
- int32_t ifm_index;
+ int32_t ofm_index; /**< Index of output feature map */
- int32_t kw_index;
- int32_t kh_index;
+ int32_t ifm_index; /**< Index of input feature map */
- int32_t hstride_index;
- int32_t vstride_index;
+ int32_t kw_index; /**< Index of kernel width */
+ int32_t kh_index; /**< Index of kernel height */
- int32_t padding_index;
- int32_t activation_index;
+ int32_t hstride_index; /**< Index of horizontal stride */
+ int32_t vstride_index; /**< Index of vertical stride */
+ int32_t padding_index; /**< Index of padding */
+ int32_t activation_index; /**< Index of activation */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Logistic.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::Logistic::Param struct
+ * and internal::tflite::op::Logistic::Node class
+ */
#ifndef __INTERNAL_OP_LOGISTIC_H__
#define __INTERNAL_OP_LOGISTIC_H__
namespace Logistic
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t ofm_index;
-
- int32_t ifm_index;
+ int32_t ofm_index; /**< Index of output feature map */
+ int32_t ifm_index; /**< Index of input feature map */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Lstm.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::LSTM::Param struct
+ * and internal::tflite::op::LSTM::Node class
+ */
#ifndef __INTERNAL_OP_LSTM_H__
#define __INTERNAL_OP_LSTM_H__
namespace LSTM
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t scratch_buffer_index;
- int32_t output_state_out_index;
- int32_t cell_state_out_index;
- int32_t output_index;
+ int32_t scratch_buffer_index; /**< Index of scartch buffer */
+ int32_t output_state_out_index; /**< Index of output state out */
+ int32_t cell_state_out_index; /**< Index of cell state out */
+ int32_t output_index; /**< Index of output */
- int32_t input_index;
- int32_t input_to_input_weights_index;
- int32_t input_to_forget_weights_index;
- int32_t input_to_cell_weights_index;
- int32_t input_to_output_weights_index;
- int32_t recurrent_to_input_weights_index;
- int32_t recurrent_to_forget_weights_index;
- int32_t recurrent_to_cell_weights_index;
- int32_t recurrent_to_output_weights_index;
- int32_t cell_to_input_weights_index;
- int32_t cell_to_forget_weights_index;
- int32_t cell_to_output_weights_index;
- int32_t input_gate_bias_index;
- int32_t forget_gate_bias_index;
- int32_t cell_bias_index;
- int32_t output_gate_bias_index;
- int32_t projection_weights_index;
- int32_t projection_bias_index;
- int32_t output_state_in_index;
- int32_t cell_state_in_index;
- int32_t activation_index;
- int32_t cell_threshold_index;
- int32_t projection_threshold_index;
+ int32_t input_index; /**< Index of input */
+ int32_t input_to_input_weights_index; /**< Index of input to input weights */
+ int32_t input_to_forget_weights_index; /**< Index of input to forget weights */
+ int32_t input_to_cell_weights_index; /**< Index of input to cell weights */
+ int32_t input_to_output_weights_index; /**< Index of input to output weights */
+ int32_t recurrent_to_input_weights_index; /**< Index of recurrent to input weights */
+ int32_t recurrent_to_forget_weights_index; /**< Index of recurrent to forget weights */
+ int32_t recurrent_to_cell_weights_index; /**< Index of recurrent to cell weights */
+ int32_t recurrent_to_output_weights_index; /**< Index of recurrent to output weights */
+ int32_t cell_to_input_weights_index; /**< Index of cell to input weights */
+ int32_t cell_to_forget_weights_index; /**< Index of cell to forget weights */
+ int32_t cell_to_output_weights_index; /**< Index of cell to output weights */
+ int32_t input_gate_bias_index; /**< Index of input gate bias */
+ int32_t forget_gate_bias_index; /**< Index of forget gate bias */
+ int32_t cell_bias_index; /**< Index of cell bias */
+ int32_t output_gate_bias_index; /**< Index of output gate bias */
+ int32_t projection_weights_index; /**< Index of projection weights */
+ int32_t projection_bias_index; /**< Index of projection bias */
+ int32_t output_state_in_index; /**< Index of output state in */
+ int32_t cell_state_in_index; /**< Index of cell state in */
+ int32_t activation_index; /**< Index of activation */
+ int32_t cell_threshold_index; /**< Index of cell threshold */
+ int32_t projection_threshold_index; /**< Index of projection threshold */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file MaxPool2D.h
+ * @brief This file contains accept function and params for MaxPool2D operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_MAX_POOL_2D_H__
#define __INTERNAL_OP_MAX_POOL_2D_H__
namespace Explicit
{
+/**
+ * @brief Struct of MaxPool2D(Explicit) operation's param
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; /**< Output format index */
- int32_t ifm_index;
+ int32_t ifm_index; /**< Input format index */
- int32_t kw_index;
- int32_t kh_index;
+ int32_t kw_index; /**< Kernel width index */
+ int32_t kh_index; /**< Kernel height index */
- int32_t hstride_index;
- int32_t vstride_index;
+ int32_t hstride_index; /**< Horizontal stride index */
+ int32_t vstride_index; /**< Vertical stride index */
- int32_t padding_left_index;
- int32_t padding_right_index;
- int32_t padding_top_index;
- int32_t padding_bottom_index;
+ int32_t padding_left_index; /**< Left padding index */
+ int32_t padding_right_index; /**< Right padding index */
+ int32_t padding_top_index; /**< Top padding index */
+ int32_t padding_bottom_index; /**< Bottom padding index */
- int32_t activation_index;
+ int32_t activation_index; /**< Activation index */
+ /**
+ * @brief Construct a new Param object for MaxPool2D(Explicit) as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for MaxPool2D(Explicit) with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for MaxPool2D(Explicit)
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for MaxPool2D(Explicit) with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for MaxPool2D(Explicit)
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for MaxPool2D(Explicit)
+ * @return Parameters of MaxPool2D(Explicit)
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for MaxPool2D(Explicit)
+ * @param [in] v Node visitor for invoking visit function of MaxPool2D(Explicit)
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
namespace Implicit
{
+/**
+ * @brief Struct of MaxPool2D(Implicit) operation's param
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; /**< Output format index */
- int32_t ifm_index;
+ int32_t ifm_index; /**< Input format index */
- int32_t kw_index;
- int32_t kh_index;
+ int32_t kw_index; /**< Kernel width index */
+ int32_t kh_index; /**< Kernel height index */
- int32_t hstride_index;
- int32_t vstride_index;
+ int32_t hstride_index; /**< Horizontal stride index */
+ int32_t vstride_index; /**< Vertical stride index */
- int32_t padding_index;
- int32_t activation_index;
+ int32_t padding_index; /**< Padding index */
+ int32_t activation_index; /**< Activation index */
+ /**
+ * @brief Construct a new Param object for MaxPool2D(Implicit) as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for MaxPool2D(Implicit) with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for MaxPool2D(Implicit)
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for MaxPool2D(Implicit) with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for MaxPool2D(Implicit)
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for MaxPool2D(Implicit)
+ * @return Parameters of MaxPool2D(Implicit)
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for MaxPool2D(Implicit)
+ * @param [in] v Node visitor for invoking visit function of MaxPool2D(Implicit)
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Mean.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::Mean::Param struct
+ * and internal::tflite::op::Mean::Node class
+ */
#ifndef __INTERNAL_OP_MEAN_H__
#define __INTERNAL_OP_MEAN_H__
namespace Mean
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t ofm_index; // output
-
- int32_t ifm_index; // input
- int32_t axis_index; // axis
- int32_t keep_dims_index; // keep_dims
+ int32_t ofm_index; /**< Index of output feature map */ // output
+ int32_t ifm_index; /**< Index of input feature map */ // input
+ int32_t axis_index; /**< Index of axis */ // axis
+ int32_t keep_dims_index; /**< Index of keep dims */ // keep_dims
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Mul.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::Mul class
+ */
#ifndef __INTERNAL_OP_MUL_H__
#define __INTERNAL_OP_MUL_H__
namespace Mul
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t ofm_index;
-
- int32_t lhs_index;
- int32_t rhs_index;
- int32_t activation_index;
+ int32_t ofm_index; /** Index of output feature map */
+ int32_t lhs_index; /** Index of lhs */
+ int32_t rhs_index; /** Index of rhs */
+ int32_t activation_index; /** Index of activation */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Node.h
+ * @brief This file contains struct of Node and NodeVisitor
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_NODE_H__
#define __INTERNAL_OP_NODE_H__
namespace op
{
+/**
+ * @brief Struct of operation NodeVisitor
+ */
struct NodeVisitor;
+/**
+ * @brief Struct of operation Node
+ */
struct Node
{
+ /**
+ * @brief Destroy the Node object for operation
+ */
virtual ~Node() = default;
+ /**
+ * @brief Function for accepting node for operation
+ * @param [in] v Node visitor for invoking visit function of operation
+ * @return N/A
+ */
virtual void accept(NodeVisitor &&) const = 0;
};
* limitations under the License.
*/
+/**
+ * @file NodeVisitor.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines NodeVisitor
+ */
+
#ifndef __INTERNAL_OP_NODE_VISITOR_H__
#define __INTERNAL_OP_NODE_VISITOR_H__
namespace op
{
+/**
+ * @brief Struct to define visitor for operation Nodes
+ */
struct NodeVisitor
{
+ /**
+ * @brief Destruct NodeVisitor object with default
+ */
virtual ~NodeVisitor() = default;
+ /**
+ * @brief Visit an Add node
+ * @param[in] node Add node to visit
+ * @return N/A
+ */
virtual void visit(const Add::Node &) = 0;
+ /**
+ * @brief Visit a Mul node
+ * @param[in] node Mul node to visit
+ * @return N/A
+ */
virtual void visit(const Sub::Node &) = 0;
+ /**
+ * @brief Visit a Mul node
+ * @param[in] node Mul node to visit
+ * @return N/A
+ */
virtual void visit(const Mul::Node &) = 0;
+ /**
+ * @brief Visit a Div node
+ * @param[in] node Div node to visit
+ * @return N/A
+ */
virtual void visit(const Div::Node &) = 0;
+ /**
+ * @brief Visit a Conv2D node with implicit padding
+ * @param[in] node Conv2D node to visit
+ * @return N/A
+ */
virtual void visit(const Conv2D::Implicit::Node &) = 0;
+ /**
+ * @brief Visit a Conv2D node with explicit padding
+ * @param[in] node Conv2D node to visit
+ * @return N/A
+ */
virtual void visit(const Conv2D::Explicit::Node &) = 0;
+ /**
+ * @brief Visit a DepthwiseConv2D node with implicit padding
+ * @param[in] node DepthwiseConv2D node to visit
+ * @return N/A
+ */
virtual void visit(const DepthwiseConv2D::Implicit::Node &) = 0;
+ /**
+ * @brief Visit a DepthwiseConv2D node with explicit padding
+ * @param[in] node DepthwiseConv2D node to visit
+ * @return N/A
+ */
virtual void visit(const DepthwiseConv2D::Explicit::Node &) = 0;
+ /**
+ * @brief Visit a Dequantize node
+ * @param[in] node Dequantize node to visit
+ * @return N/A
+ */
virtual void visit(const Dequantize::Node &) = 0;
+ /**
+ * @brief Visit a MaxPool2D node with implicit padding
+ * @param[in] node MaxPool2D node to visit
+ * @return N/A
+ */
virtual void visit(const MaxPool2D::Implicit::Node &) = 0;
+ /**
+ * @brief Visit a MaxPool2D node with explicit padding
+ * @param[in] node MaxPool2D node to visit
+ * @return N/A
+ */
virtual void visit(const MaxPool2D::Explicit::Node &) = 0;
+ /**
+ * @brief Visit an AvgPool2D node with implicit padding
+ * @param[in] node AvgPool2D node to visit
+ * @return N/A
+ */
virtual void visit(const AvgPool2D::Implicit::Node &) = 0;
+ /**
+ * @brief Visit an AvgPool2D node with explicit padding
+ * @param[in] node AvgPool2D node to visit
+ * @return N/A
+ */
virtual void visit(const AvgPool2D::Explicit::Node &) = 0;
+ /**
+ * @brief Visit a Concat node
+ * @param[in] node Concat node to visit
+ * @return N/A
+ */
virtual void visit(const Concat::Node &) = 0;
+ /**
+ * @brief Visit an Reshape node
+ * @param[in] node Reshape node to visit
+ * @return N/A
+ */
virtual void visit(const Reshape::Node &) = 0;
+ /**
+ * @brief Visit an ResizeBilinear node
+ * @param[in] node ResizeBilinear node to visit
+ * @return N/A
+ */
virtual void visit(const ResizeBilinear::Node &) = 0;
+ /**
+ * @brief Visit a StridedSlice node
+ * @param[in] node StridedSlice node to visit
+ * @return N/A
+ */
virtual void visit(const StridedSlice::Node &) = 0;
+ /**
+ * @brief Visit a FullyConnected node
+ * @param[in] node FullyConnected node to visit
+ * @return N/A
+ */
virtual void visit(const FullyConnected::Node &) = 0;
+ /**
+ * @brief Visit a Softmax node
+ * @param[in] node Softmax node to visit
+ * @return N/A
+ */
virtual void visit(const Softmax::Node &) = 0;
+ /**
+ * @brief Visit a ReduceMax node
+ * @param[in] node ReduceMax node to visit
+ * @return N/A
+ */
virtual void visit(const ReduceMax::Node &) = 0;
+ /**
+ * @brief Visit a Cast node
+ * @param[in] node Cast node to visit
+ * @return N/A
+ */
virtual void visit(const Cast::Node &) = 0;
+ /**
+ * @brief Visit a TopKV2 node
+ * @param[in] node TopKV2 node to visit
+ * @return N/A
+ */
virtual void visit(const TopKV2::Node &) = 0;
+ /**
+ * @brief Visit a Gather node
+ * @param[in] node Gather node to visit
+ * @return N/A
+ */
virtual void visit(const Gather::Node &) = 0;
+ /**
+ * @brief Visit an ReLU node
+ * @param[in] node Relu node to visit
+ * @return N/A
+ */
virtual void visit(const ReLU::Node &) = 0;
+ /**
+ * @brief Visit a ReLU1 node
+ * @param[in] node ReLU1 node to visit
+ * @return N/A
+ */
virtual void visit(const ReLU1::Node &) = 0;
+ /**
+ * @brief Visit a ReLU6 node
+ * @param[in] node ReLU6 node to visit
+ * @return N/A
+ */
virtual void visit(const ReLU6::Node &) = 0;
+ /**
+ * @brief Visit a Tanh node
+ * @param[in] node Tanh node to visit
+ * @return N/A
+ */
virtual void visit(const Tanh::Node &) = 0;
+ /**
+ * @brief Visit a Squeeze node
+ * @param[in] node Squeeze node to visit
+ * @return N/A
+ */
virtual void visit(const Squeeze::Node &) = 0;
+ /**
+ * @brief Visit an Logistic node
+ * @param[in] node Logistic node to visit
+ * @return N/A
+ */
virtual void visit(const Logistic::Node &) = 0;
+ /**
+ * @brief Visit a Mean node
+ * @param[in] node Mean node to visit
+ * @return N/A
+ */
virtual void visit(const Mean::Node &) = 0;
+ /**
+ * @brief Visit an RNN node
+ * @param[in] node RNN node to visit
+ * @return N/A
+ */
virtual void visit(const RNN::Node &) = 0;
+ /**
+ * @brief Visit a Transpose node
+ * @param[in] node Transpose node to visit
+ * @return N/A
+ */
virtual void visit(const Transpose::Node &) = 0;
+ /**
+ * @brief Visit an LSTM node
+ * @param[in] node LSTM node to visit
+ * @return N/A
+ */
virtual void visit(const LSTM::Node &) = 0;
+ /**
+ * @brief Visit a Floor node
+ * @param[in] node Floor node to visit
+ * @return N/A
+ */
virtual void visit(const Floor::Node &) = 0;
+ /**
+ * @brief Visit a Split node
+ * @param[in] node Split node to visit
+ * @return N/A
+ */
virtual void visit(const Split::Node &) = 0;
+ /**
+ * @brief Visit an RSQRT node
+ * @param[in] node RSQRT node to visit
+ * @return N/A
+ */
virtual void visit(const RSQRT::Node &) = 0;
+ /**
+ * @brief Visit a Pad node
+ * @param[in] node Pad node to visit
+ * @return N/A
+ */
virtual void visit(const Pad::Node &) = 0;
+ /**
+ * @brief Visit a SpaceToDepth node
+ * @param[in] node SpaceToDepth node to visit
+ * @return N/A
+ */
virtual void visit(const SpaceToDepth::Node &) = 0;
virtual void visit(const SpaceToBatchND::Node &) = 0;
+ /**
+ * @brief Visit an L2Pool2D node with implicit padding
+ * @param[in] node L2Pool2D node to visit
+ * @return N/A
+ */
virtual void visit(const L2Pool2D::Implicit::Node &) = 0;
+ /**
+ * @brief Visit an L2Pool2D node with explicit padding
+ * @param[in] node L2Pool2D node to visit
+ * @return N/A
+ */
virtual void visit(const L2Pool2D::Explicit::Node &) = 0;
+ /**
+ * @brief Visit an EmbeddingLookup node
+ * @param[in] node EmbeddingLookup node to visit
+ * @return N/A
+ */
virtual void visit(const EmbeddingLookup::Node &) = 0;
+ /**
+ * @brief Visit a HashtableLookup node
+ * @param[in] node HashtableLookup node to visit
+ * @return N/A
+ */
virtual void visit(const HashtableLookup::Node &) = 0;
+ /**
+ * @brief Visit an L2Normalization node
+ * @param[in] node L2Normalization node to visit
+ * @return N/A
+ */
virtual void visit(const L2Normalization::Node &) = 0;
+ /**
+ * @brief Visit a SquaredDifference node
+ * @param[in] node SquaredDifference node to visit
+ * @return N/A
+ */
virtual void visit(const SquaredDifference::Node &) = 0;
virtual void visit(const LocalResponseNormalization::Node &) = 0;
virtual void visit(const DepthToSpace::Node &) = 0;
* limitations under the License.
*/
+/**
+ * @file Pad.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines Pad node
+ */
+
#ifndef __INTERNAL_OP_PAD_H__
#define __INTERNAL_OP_PAD_H__
namespace Pad
{
+/**
+ * @brief Struct to manipulate parameter for Pad operation
+ */
struct Param
{
- int32_t ifm_index;
- int32_t paddings_index;
- int32_t ofm_index;
+ int32_t ifm_index; //!< index for input
+ int32_t paddings_index; //!< index for padding
+ int32_t ofm_index; //!< index for output
+ /**
+ * @brief Default Constructor
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object
+ * @param[in] inputCount the number of inputs
+ * @param[in] inputs pointer for input data
+ * @param[in] outputCount the number of outputs
+ * @param[in] outputs pointer for input data
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define Pad Operation
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new RNN Node object
+ * @param param Parameter for RNN Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Default Destructor
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameter
+ * @return Param reference
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Accept a NodeVisitor so that it can visit this node
+ * @param [in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
- const Param _param;
+ const Param _param; //!< parameter for Pad node
};
} // namespace Pad
* limitations under the License.
*/
+/**
+ * @file RSQRT.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::RSQRT::Param struct
+ * and internal::tflite::op::RSQRT::Node class
+ */
#ifndef __INTERNAL_OP_RSQRT_H__
#define __INTERNAL_OP_RSQRT_H__
namespace RSQRT
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t output_index;
-
- int32_t input_index;
+ int32_t output_index; /**< Index of output feature map */
+ int32_t input_index; /**< Index of input feature map */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file ReLU.h
+ * @brief This file contains accept function and params for ReLU operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_RELU_H__
#define __INTERNAL_OP_RELU_H__
namespace ReLU
{
+/**
+ * @brief Struct of ReLU operation's param
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; /**< Output format index */
- int32_t ifm_index;
+ int32_t ifm_index; /**< Input format index */
+ /**
+ * @brief Construct a new Param object for ReLU as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for ReLU with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for ReLU
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for ReLU with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for ReLU
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for ReLU
+ * @return Parameters of ReLU
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for ReLU
+ * @param [in] v Node visitor for invoking visit function of ReLU
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file ReLU1.h
+ * @brief This file contains accept function and params for ReLU1 operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_RELU1_H__
#define __INTERNAL_OP_RELU1_H__
namespace ReLU1
{
+/**
+ * @brief Struct of ReLU1 operation's param
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; /**< Output format index */
- int32_t ifm_index;
+ int32_t ifm_index; /**< Input format index */
+ /**
+ * @brief Construct a new Param object for ReLU1 as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for ReLU1 with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for ReLU1
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for ReLU1 with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for ReLU1
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for ReLU1
+ * @return Parameters of ReLU1
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for ReLU1
+ * @param [in] v Node visitor for invoking visit function of ReLU1
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file ReLU6.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::ReLU6 class
+ */
#ifndef __INTERNAL_OP_RELU6_H__
#define __INTERNAL_OP_RELU6_H__
namespace ReLU6
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t ofm_index;
-
- int32_t ifm_index;
+ int32_t ofm_index; /** Index of output feature map */
+ int32_t ifm_index; /** Index of input feature map */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file ReduceMax.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::ReduceMax::Param struct
+ * and internal::tflite::op::ReduceMax::Node class
+ */
#ifndef __INTERNAL_OP_REDUCEMAX_H__
#define __INTERNAL_OP_REDUCEMAX_H__
namespace ReduceMax
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t ofm_index;
-
- int32_t ifm_index;
- int32_t axis_index;
+ int32_t ofm_index; /**< Index of output feature map */
+ int32_t ifm_index; /**< Index of input feature map */
+ int32_t axis_index; /**< Index of axis */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Reshape.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines Reshape node
+ */
+
#ifndef __INTERNAL_OP_RESHAPE_H__
#define __INTERNAL_OP_RESHAPE_H__
namespace Reshape
{
+/**
+ * @brief Struct to manipulate parameter for Reshape operation
+ */
struct Param
{
- int32_t output_index;
+ int32_t output_index; //!< index for output feature map
- int32_t input_index;
- int32_t shape_index;
+ int32_t input_index; //!< index for input feature map
+ int32_t shape_index; //!< index for shape
+ /**
+ * @brief Default Constructor
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object
+ * @param[in] inputCount the number of inputs
+ * @param[in] inputs pointer for input data
+ * @param[in] outputCount the number of outputs
+ * @param[in] outputs pointer for input data
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define Reshape Operation
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Reshape Node object
+ * @param param Parameter for Reshape Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Default Destructor
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameter
+ * @return Param reference
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Accept a NodeVisitor so that it can visit this node
+ * @param [in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
- const Param _param;
+ const Param _param; //!< parameter for Reshape node
};
} // namespace Reshape
* limitations under the License.
*/
+/**
+ * @file ResizeBilinear.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::ResizeBilinear::Param struct
+ * and internal::tflite::op::ResizeBilinear::Node class
+ */
#ifndef __INTERNAL_OP_RESIZE_BILINEAR_H__
#define __INTERNAL_OP_RESIZE_BILINEAR_H__
namespace ResizeBilinear
{
+/**
+ * @brief Struct to have indexes for ResizeBilinear operation parameter
+ */
struct Param
{
- int32_t ofm_index;
-
- int32_t ifm_index;
- int32_t height_index;
- int32_t width_index;
+ int32_t ofm_index; /**< Index of output feature map */
+ int32_t ifm_index; /**< Index of input feature map */
+ int32_t height_index; /**< Index of height */
+ int32_t width_index; /**< Index of width */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an ResizeBilinear operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Rnn.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines RNN node
+ */
+
#ifndef __INTERNAL_OP_RNN_H__
#define __INTERNAL_OP_RNN_H__
namespace RNN
{
+/**
+ * @brief Struct to manipulate parameter for RNN operation
+ */
struct Param
{
- int32_t output_index;
- int32_t hidden_state_out_index;
+ int32_t output_index; //!< index for ouuput
+ int32_t hidden_state_out_index; //!< index for hidden state output
- int32_t input_index;
- int32_t weights_index;
- int32_t recurrent_weights_index;
- int32_t bias_index;
- int32_t hidden_state_in_index;
- int32_t fused_activation_index;
+ int32_t input_index; //!< index for input
+ int32_t weights_index; //!< index for weight
+ int32_t recurrent_weights_index; //!< index for recurrent weights
+ int32_t bias_index; //!< index for bias
+ int32_t hidden_state_in_index; //!< index for hidden state input
+ int32_t fused_activation_index; //!< index for fused activation
+ /**
+ * @brief Default Constructor
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object
+ * @param[in] inputCount the number of inputs
+ * @param[in] inputs pointer for input data
+ * @param[in] outputCount the number of outputs
+ * @param[in] outputs pointer for input data
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define RNN Operation
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new RNN Node object
+ * @param param Parameter for RNN Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Default Destructor
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameter
+ * @return Param reference
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Accept a NodeVisitor so that it can visit this node
+ * @param [in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
- const Param _param;
+ const Param _param; //!< parameter for RNN node
};
} // namespace RNN
* limitations under the License.
*/
+/**
+ * @file Softmax.h
+ * @brief This file contains accept function and params for Softmax operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_SOFTMAX_H__
#define __INTERNAL_OP_SOFTMAX_H__
namespace Softmax
{
+/**
+ * @brief Struct of Softmax operation's param
+ */
struct Param
{
- int32_t output_index;
+ int32_t output_index; /**< Output index */
- int32_t input_index;
- int32_t scale_index;
+ int32_t input_index; /**< Input index */
+ int32_t scale_index; /**< Scale index */
+ /**
+ * @brief Construct a new Param object for Softmax as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for Softmax with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for Softmax
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for Softmax with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for Softmax
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for Softmax
+ * @return Parameters of Softmax
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for Softmax
+ * @param [in] v Node visitor for invoking visit function of Softmax
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file SpaceToDepth.h
+ * @brief This file contains accept function and params for SpaceToDepth operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_SPACETODEPTH_H__
#define __INTERNAL_OP_SPACETODEPTH_H__
namespace SpaceToDepth
{
+/**
+ * @brief Struct of SpaceToDepth operation's param
+ */
struct Param
{
- int32_t output_index;
+ int32_t output_index; /**< Output index */
- int32_t input_index;
- int32_t block_size_index;
+ int32_t input_index; /**< Input index */
+ int32_t block_size_index; /**< Block size index */
+ /**
+ * @brief Construct a new Param object for SpaceToDepth as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for SpaceToDepth with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for SpaceToDepth
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for SpaceToDepth with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for SpaceToDepth
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for SpaceToDepth
+ * @return Parameters of SpaceToDepth
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for SpaceToDepth
+ * @param [in] v Node visitor for invoking visit function of SpaceToDepth
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Split.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines Split node
+ */
+
#ifndef __INTERNAL_OP_SPLIT_H__
#define __INTERNAL_OP_SPLIT_H__
namespace Split
{
+/**
+ * @brief Struct to manipulate parameter for Split operation
+ */
struct Param
{
- int32_t axis_index;
- int32_t ifm_index;
+ int32_t axis_index; //!< index for axis
+ int32_t ifm_index; //!< index for input feature map
- std::vector<int32_t> ofm_indexes;
+ std::vector<int32_t> ofm_indexes; //!< index for output feature map
+ /**
+ * @brief Default Constructor
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object
+ * @param[in] inputCount the number of inputs
+ * @param[in] inputs pointer for input data
+ * @param[in] outputCount the number of outputs
+ * @param[in] outputs pointer for input data
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define Split Operation
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Split Node object
+ * @param param Parameter for Split Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Default Destructor
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameter
+ * @return Param reference
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Accept a NodeVisitor so that it can visit this node
+ * @param [in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
- const Param _param;
+ const Param _param; //!< parameter for Split node
};
} // namespace Split
* limitations under the License.
*/
+/**
+ * @file SquaredDifference.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::SquaredDifference::Param struct
+ * and internal::tflite::op::SquaredDifference::Node class
+ */
#ifndef __INTERNAL_OP_SQUAREDDIFFERENCE_H__
#define __INTERNAL_OP_SQUAREDDIFFERENCE_H__
namespace SquaredDifference
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t ofm_index;
-
- int32_t lhs_index;
- int32_t rhs_index;
+ int32_t ofm_index; /**< Index of output feature map */
+ int32_t lhs_index; /**< Index of lhs */
+ int32_t rhs_index; /**< Index of rhs */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Squeeze.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines internal::tflite::op::Squeeze::Param struct
+ * and internal::tflite::op::Squeeze::Node class
+ */
#ifndef __INTERNAL_OP_SQUEEZE_H__
#define __INTERNAL_OP_SQUEEZE_H__
namespace Squeeze
{
+/**
+ * @brief Struct to have indexes for operation parameter
+ */
struct Param
{
- int32_t output_index;
-
- int32_t input_index;
- int32_t dims_index_optional = -1; // optional param. default is -1
+ int32_t output_index; /**< Index of output feature map */
+ int32_t input_index; /**< Index of input feature map */
+ // optional param. default is -1
+ int32_t dims_index_optional = -1; /**< Index of dims */
+ /**
+ * @brief Construct as default
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object with params
+ * @param[in] inputCount Count of inputs
+ * @param[in] inputs Pointer of inputs
+ * @param[in] outputCount Count of outputs
+ * @param[in] outputs Pointer of outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to represent an operation of data structure
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object with param
+ * @param[in] param Param object that makes up a Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destruct as default
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get a reference of Param object
+ * @return Reference of Param object
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Visit this Node by NodeVisitor
+ * @param[in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file StridedSlice.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines StridedSlice node
+ */
+
#ifndef __INTERNAL_OP_STRIDEDSLICE_H__
#define __INTERNAL_OP_STRIDEDSLICE_H__
namespace StridedSlice
{
+/**
+ * @brief Struct to manipulate parameter for StridedSlice operation
+ */
struct Param
{
- int32_t outputData_index;
+ int32_t outputData_index; //!< index for output data
- int32_t inputData_index;
- int32_t startData_index;
- int32_t endData_index;
- int32_t stridesData_index;
- int32_t beginMask_index;
- int32_t endMask_index;
- int32_t shrinkAxisMask_index;
+ int32_t inputData_index; //!< index for input data
+ int32_t startData_index; //!< index where slicing start from
+ int32_t endData_index; //!< index where slicing ends to
+ int32_t stridesData_index; //!< index for stride value
+ int32_t beginMask_index; //!< index for beginmask
+ int32_t endMask_index; //!< index for endmask
+ int32_t shrinkAxisMask_index; //!< index for shrink axis
+ /**
+ * @brief Default Constructor
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object
+ * @param[in] inputCount the number of inputs
+ * @param[in] inputs pointer for input data
+ * @param[in] outputCount the number of outputs
+ * @param[in] outputs pointer for input data
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define StridedSlice Operation
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new StridedSlice Node object
+ * @param param Parameter for StridedSlice Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Default Destructor
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameter
+ * @return Param reference
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Accept a NodeVisitor so that it can visit this node
+ * @param [in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
- const Param _param;
+ const Param _param; //!< parameter for StridedSlice node
};
} // namespace StridedSlice
* limitations under the License.
*/
+/**
+ * @file Sub.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines SUB Node
+ */
+
#ifndef __INTERNAL_OP_SUB_H__
#define __INTERNAL_OP_SUB_H__
namespace Sub
{
+/**
+ * @brief Struct to manipulate parameters for SUB
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; //!< index for output feature map
- int32_t lhs_index;
- int32_t rhs_index;
- int32_t activation_index;
+ int32_t lhs_index; //!< index for left-hand side
+ int32_t rhs_index; //!< index for right-hand side
+ int32_t activation_index; //!< index for activation function
+ /**
+ * @brief Default Constructor
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object
+ * @param[in] inputCount the number of inputs
+ * @param[in] inputs pointer for input data
+ * @param[in] outputCount the number of outputs
+ * @param[in] outputs pointer for input data
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define SUB Operation
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Sub Node object
+ * @param param Parameter for Sub Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Default Destructor
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameter
+ * @return Param reference
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Accept a NodeVisitor so that it can visit this node
+ * @param [in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
- const Param _param;
+ const Param _param; //!< parameter for SUB node
};
} // namespace Sub
* limitations under the License.
*/
+/**
+ * @file Tanh.h
+ * @ingroup COM_AI_RUNTIME
+ * @brief This file defines TANH node
+ */
+
#ifndef __INTERNAL_OP_TANH_H__
#define __INTERNAL_OP_TANH_H__
namespace Tanh
{
+/**
+ * @brief Struct to manipulate parameter for hyperbolic tangent operation
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; //!< index for output feature map
- int32_t ifm_index;
+ int32_t ifm_index; //!< index for input feature map
+ /**
+ * @brief Default Constructor
+ */
Param() = default;
+ /**
+ * @brief Construct a new Param object
+ * @param[in] inputCount the number of inputs
+ * @param[in] inputs pointer for input data
+ * @param[in] outputCount the number of outputs
+ * @param[in] outputs pointer for input data
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define Tanh Operation
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Tanh Node object
+ * @param param Parameter for Tanh Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Default Destructor
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameter
+ * @return Param reference
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Accept a NodeVisitor so that it can visit this node
+ * @param [in] v Visitor
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
- const Param _param;
+ const Param _param; //!< parameter for Tanh node
};
} // namespace Tanh
* limitations under the License.
*/
+/**
+ * @file TopKV2.h
+ * @brief This file contains accept function and params for TopKV2 operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_TOPKV2_H__
#define __INTERNAL_OP_TOPKV2_H__
namespace TopKV2
{
+/**
+ * @brief Struct of TopKV2 operation's param
+ */
struct Param
{
- int32_t outputValues_index;
- int32_t outputIndices_index;
+ int32_t outputValues_index; /**< Output values index */
+ int32_t outputIndices_index; /**< Output indices index */
- int32_t inputData_index;
- int32_t k_index;
+ int32_t inputData_index; /**< Input data index */
+ int32_t k_index; /**< K value index */
+ /**
+ * @brief Construct a new Param object for TopKV2 as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for TopKV2 with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for TopKV2
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for TopKV2 with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for TopKV2
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for TopKV2
+ * @return Parameters of TopKV2
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for TopKV2
+ * @param [in] v Node visitor for invoking visit function of TopKV2
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file Transpose.h
+ * @brief This file contains accept function and params for Transpose operation
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __INTERNAL_OP_TRANSPOSE_H__
#define __INTERNAL_OP_TRANSPOSE_H__
namespace Transpose
{
+/**
+ * @brief Struct of Transpose operation's param
+ */
struct Param
{
- int32_t ofm_index;
+ int32_t ofm_index; /**< Output format index */
- int32_t ifm_index;
- int32_t permu_index;
+ int32_t ifm_index; /**< Input format index */
+ int32_t permu_index; /**< Permutation index */
+ /**
+ * @brief Construct a new Param object for Transpose as defualt
+ */
Param() = default;
+
+ /**
+ * @brief Construct a new Param object for Transpose with params
+ * @param [in] inputCount The number of input
+ * @param [in] inputs Array containing inputs
+ * @param [in] outputCount The number of output
+ * @param [in] outputs Array containing outputs
+ */
Param(uint32_t inputCount, const uint32_t *inputs, uint32_t outputCount, const uint32_t *outputs);
};
+/**
+ * @brief Class to define operation node for Transpose
+ */
class Node final : public op::Node
{
public:
+ /**
+ * @brief Construct a new Node object for Transpose with param
+ * @param [in] param Parameters for Node
+ */
Node(const Param ¶m) : _param(param)
{
// DO NOTHING
}
public:
+ /**
+ * @brief Destroy the Node object for Transpose
+ */
virtual ~Node() = default;
public:
+ /**
+ * @brief Get parameters for Transpose
+ * @return Parameters of Transpose
+ */
const Param ¶m(void) const { return _param; }
public:
+ /**
+ * @brief Function for accepting node for Transpose
+ * @param [in] v Node visitor for invoking visit function of Transpose
+ * @return N/A
+ */
void accept(NodeVisitor &&) const override;
private:
* limitations under the License.
*/
+/**
+ * @file logging.h
+ * @brief This file contains Context class for logging.
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __PURE_ARM_COMPUTE_LOGGING_H__
#define __PURE_ARM_COMPUTE_LOGGING_H__
namespace logging
{
+/**
+ * @brief class to define Context for logging
+ */
class Context
{
public:
+ /**
+ * @brief Construct default
+ */
Context() : _enabled{false}
{
auto env = std::getenv("PURE_ARM_COMPUTE_LOG_ENABLE");
}
public:
+ /**
+ * @brief Get @c true if PURE_ARM_COMPUTE_LOG_ENABLE has been set as environment value, otherwise
+ * @c false
+ * @return @c true if PURE_ARM_COMPUTE_LOG_ENABLE has been set as environment value, otherwise @c
+ * false
+ */
bool enabled(void) const { return _enabled; }
private:
bool _enabled;
};
+/**
+ * @brief static Context class for logging
+ */
static Context ctx;
} // namespace logging
* limitations under the License.
*/
+/**
+ * @file execution.h
+ * @brief This file defines ANeuralNetworksMemory class for handling Memory NNAPI
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __MEMORY_H__
#define __MEMORY_H__
#include <cstdint>
+/**
+ * @brief struct to define Memory NNAPI
+ */
struct ANeuralNetworksMemory
{
public:
+ /**
+ * @brief Constructor with params
+ * @param [in] size The requested size in bytes
+ * @param [in] protect The desired memory protection for the mapping
+ * @param [in] fd The requested file descriptor
+ * @param [in] offset The offset to the beginning of the file of the area to map
+ */
ANeuralNetworksMemory(size_t size, int protect, int fd, size_t offset);
+ /**
+ * @brief Destructor default
+ */
~ANeuralNetworksMemory();
public:
+ /**
+ * @brief Get size
+ * @return size
+ */
size_t size(void) const { return _size; }
+ /**
+ * @brief Get base pointer
+ * @return base pointer
+ */
uint8_t *base(void) { return _base; }
+ /**
+ * @brief Get base pointer
+ * @return const base pointer
+ */
const uint8_t *base(void) const { return _base; }
private:
* limitations under the License.
*/
+/**
+ * @file model.h
+ * @brief This file contains ANeuralNetworksModel classe for handling Model NNAPI such as
+ * ANeuralNetworksModel_create, ANeuralNetworksModel_addOperand
+ * @ingroup COM_AI_RUNTIME
+ */
+
#ifndef __MODEL_H__
#define __MODEL_H__
#include "internal/Model.h"
+/**
+ * @brief struct to express Model of NNAPI
+ */
struct ANeuralNetworksModel
{
public:
+ /**
+ * @brief Construct without params
+ */
ANeuralNetworksModel();
public:
+ /**
+ * @brief Get reference of internal::tflite::Model
+ * @return Reference of internal::tflite::Model
+ */
internal::tflite::Model &deref(void) { return *_model; }
public:
+ /**
+ * @brief Release internal::tflite::Model pointer to param
+ * @param [in] model To get released internal::tflite::Model pointer
+ * @return N/A
+ */
void release(std::shared_ptr<const internal::tflite::Model> &model) { model = _model; }
+ /**
+ * @brief Get @c true if ANeuralNetworksModel_finish has been called, otherwise @c false
+ * @return @c true if ANeuralNetworksModel_finish has been called, otherwise @c false
+ */
bool isFinished() { return _isFinished == true; }
+ /**
+ * @brief Mark model process finished
+ * @return N/A
+ */
void markAsFinished() { _isFinished = true; }
private: