{
/**
- * @brief Class to read value of feature which is inherited from nnfw::util::feature::Reader<T> class
+ * @brief Class to read value of feature which is inherited
+ * from nnfw::util::feature::Reader<T> class
*/
-template<typename T> class Reader : public nnfw::util::feature::Reader<T>
+template <typename T> class Reader : public nnfw::util::feature::Reader<T>
{
public:
/**
* @param[in] shape The shape of a feature
* @param[in] base The base address of a feature
*/
- Reader(const nnfw::util::feature::Shape &shape, const T *base)
- : _shape{shape}, _base{base}
+ Reader(const nnfw::util::feature::Shape &shape, const T *base) : _shape{shape}, _base{base}
{
// DO NOTHING
}
}
private:
- nnfw::util::feature::Shape _shape; /**< Shape of feature */
- const T *_base; /**< Base address of feature */
+ nnfw::util::feature::Shape _shape; /**< Shape of feature */
+ const T *_base; /**< Base address of feature */
};
} // namespace feature
#define STR_DETAIL(value) #value
#define STR(value) STR_DETAIL(value)
-#define TFLITE_ENSURE(exp) { \
- const TfLiteStatus status = (exp); \
- \
- if (status != kTfLiteOk) \
- { \
- std::ostringstream ss; \
- ss << #exp << " failed (" << __FILE__ << ":" << __LINE__ << ")"; \
- throw std::runtime_error{ss.str()}; \
- } \
-}
+#define TFLITE_ENSURE(exp) \
+ { \
+ const TfLiteStatus status = (exp); \
+ \
+ if (status != kTfLiteOk) \
+ { \
+ std::ostringstream ss; \
+ ss << #exp << " failed (" << __FILE__ << ":" << __LINE__ << ")"; \
+ throw std::runtime_error{ss.str()}; \
+ } \
+ }
#endif // __NNFW_SUPPORT_TFLITE_ASSERT_H__
* @param[in] comparator Comparator object for tensor comparation
*/
TfLiteInterpMatchApp(const nnfw::util::tensor::Comparator &comparator)
- : _verbose{false}, _comparator(comparator)
+ : _verbose{false}, _comparator(comparator)
{
// DO NOTHING
}
*/
template <typename T>
bool compareSingleTensorView(const nnfw::support::tflite::TensorView<T> &expected,
- const nnfw::support::tflite::TensorView<T> &obtained,
- int id) const;
+ const nnfw::support::tflite::TensorView<T> &obtained, int id) const;
private:
const nnfw::util::tensor::Comparator &_comparator;
* @brief Generate random numbers for type T
* @return Random generated value
*/
- template <typename T> T generate(void)
- {
- return _dist(_rand);
- }
+ template <typename T> T generate(void) { return _dist(_rand); }
private:
std::minstd_rand _rand;
const TfLiteQuantizationParams _quantization;
};
-template <>
-uint8_t RandomGenerator::generate<uint8_t>(void);
+template <> uint8_t RandomGenerator::generate<uint8_t>(void);
/**
* @brief Structure for NNAPI correctness test
namespace tflite
{
-template<typename T> class FeatureView;
+template <typename T> class FeatureView;
/**
* @brief Class to support reading element of float type feature
*/
-template<> class FeatureView<float> : public nnfw::util::feature::Reader<float>
+template <> class FeatureView<float> : public nnfw::util::feature::Reader<float>
{
public:
/**
}
private:
- ::tflite::Interpreter * const _interp;
+ ::tflite::Interpreter *const _interp;
};
} // namespace tflite
* @brief Run the Invoke function of NNAPI delegate
* @return @c true if Invoke() is successful, otherwise @c false
*/
- bool run(void) override
- {
- return kTfLiteOk == _delegate.Invoke(_interp);
- }
+ bool run(void) override { return kTfLiteOk == _delegate.Invoke(_interp); }
/**
* @brief Tear down TfLite interpreter session
}
private:
- ::tflite::Interpreter * const _interp;
+ ::tflite::Interpreter *const _interp;
nnfw::NNAPIDelegate _delegate;
};
*/
class TensorLogger
{
- private:
- std::ofstream _outfile;
-
- public:
- /**
- * @brief Get TensorLogger instance
- * @return The TensorLogger instance
- */
- static TensorLogger &instance()
+private:
+ std::ofstream _outfile;
+
+public:
+ /**
+ * @brief Get TensorLogger instance
+ * @return The TensorLogger instance
+ */
+ static TensorLogger &instance()
+ {
+ static TensorLogger instance;
+ return instance;
+ }
+
+ /**
+ * @brief Save the tensor details to file from interpreter
+ * @param[in] path The file path to save
+ * @param[in] interp The TfLite interpreter
+ */
+ void save(const std::string &path, ::tflite::Interpreter &interp)
+ {
+ open(path);
+
+ int log_index = 0;
+ for (const auto id : interp.inputs())
{
- static TensorLogger instance;
- return instance;
+ _outfile << "# input tensors" << std::endl;
+ printTensor(interp, id, log_index++);
}
-
- /**
- * @brief Save the tensor details to file from interpreter
- * @param[in] path The file path to save
- * @param[in] interp The TfLite interpreter
- */
- void save(const std::string &path, ::tflite::Interpreter &interp)
+ for (const auto id : interp.outputs())
{
- open(path);
-
- int log_index = 0;
- for (const auto id : interp.inputs())
- {
- _outfile << "# input tensors" << std::endl;
- printTensor(interp, id, log_index++);
- }
- for (const auto id : interp.outputs())
- {
- _outfile << "# output tensors" << std::endl;
- printTensor(interp, id, log_index++);
- }
- close();
+ _outfile << "# output tensors" << std::endl;
+ printTensor(interp, id, log_index++);
}
-
- private:
-
- void open(const std::string &path)
+ close();
+ }
+
+private:
+ void open(const std::string &path)
+ {
+ if (!_outfile.is_open())
+ _outfile.open(path, std::ios_base::out);
+
+ _outfile << "# ------ file: " << path << " ------" << std::endl
+ << "tensor_shape_gen = []" << std::endl
+ << "tensor_value_gen = []" << std::endl
+ << std::endl;
+ }
+
+ void printTensor(::tflite::Interpreter &interp, const int id, const int log_index)
+ {
+ const TfLiteTensor *tensor = interp.tensor(id);
+
+ _outfile << "# tensor name: " << tensor->name << std::endl;
+ _outfile << "# tflite::interpreter.tensor(" << id << ") -> "
+ "tensor_value_gen["
+ << log_index << "]" << std::endl;
+
+ if (tensor->type == kTfLiteInt32)
{
- if (! _outfile.is_open())
- _outfile.open(path, std::ios_base::out);
-
- _outfile << "# ------ file: " << path << " ------" << std::endl
- << "tensor_shape_gen = []" << std::endl
- << "tensor_value_gen = []" << std::endl << std::endl;
+ printTensorShape(tensor);
+ printTensorValue<int32_t>(tensor, tensor->data.i32);
}
-
- void printTensor(::tflite::Interpreter &interp, const int id, const int log_index)
+ else if (interp.tensor(id)->type == kTfLiteUInt8)
{
- const TfLiteTensor* tensor = interp.tensor(id);
-
- _outfile << "# tensor name: " << tensor->name << std::endl;
- _outfile << "# tflite::interpreter.tensor("<< id <<") -> "
- "tensor_value_gen["<< log_index << "]" << std::endl;
-
- if (tensor->type == kTfLiteInt32)
- {
- printTensorShape(tensor);
- printTensorValue<int32_t>(tensor, tensor->data.i32);
- }
- else if (interp.tensor(id)->type == kTfLiteUInt8)
- {
- printTensorShape(tensor);
- printTensorValue<uint8_t>(tensor, tensor->data.uint8);
- }
- else if (tensor->type == kTfLiteFloat32)
- {
- printTensorShape(tensor);
- printTensorValue<float>(tensor, tensor->data.f);
- }
+ printTensorShape(tensor);
+ printTensorValue<uint8_t>(tensor, tensor->data.uint8);
}
-
- void printTensorShape(const TfLiteTensor* tensor)
+ else if (tensor->type == kTfLiteFloat32)
{
- _outfile << "tensor_shape_gen.append('{";
+ printTensorShape(tensor);
+ printTensorValue<float>(tensor, tensor->data.f);
+ }
+ }
- size_t r = 0;
- for (; r < tensor->dims->size - 1; r++)
- {
- _outfile << tensor->dims->data[r]
- << ", ";
- }
- _outfile << tensor->dims->data[r];
+ void printTensorShape(const TfLiteTensor *tensor)
+ {
+ _outfile << "tensor_shape_gen.append('{";
- _outfile << "}')" << std::endl;
+ size_t r = 0;
+ for (; r < tensor->dims->size - 1; r++)
+ {
+ _outfile << tensor->dims->data[r] << ", ";
}
+ _outfile << tensor->dims->data[r];
- template <typename T>
- void printTensorValue(const TfLiteTensor* tensor, T* tensor_data_ptr)
- {
- _outfile << "tensor_value_gen.append([";
+ _outfile << "}')" << std::endl;
+ }
- _outfile << std::fixed << std::setprecision(10);
+ template <typename T> void printTensorValue(const TfLiteTensor *tensor, T *tensor_data_ptr)
+ {
+ _outfile << "tensor_value_gen.append([";
- const T *end = reinterpret_cast<const T *>(tensor->data.raw_const + tensor->bytes);
- for (T *ptr = tensor_data_ptr; ptr < end; ptr++)
- _outfile << *ptr << ", ";
+ _outfile << std::fixed << std::setprecision(10);
- _outfile << "])" << std::endl << std::endl;
- }
+ const T *end = reinterpret_cast<const T *>(tensor->data.raw_const + tensor->bytes);
+ for (T *ptr = tensor_data_ptr; ptr < end; ptr++)
+ _outfile << *ptr << ", ";
- void close()
- {
- _outfile << "# --------- tensor shape and value defined above ---------" << std::endl;
- _outfile.close();
- }
+ _outfile << "])" << std::endl << std::endl;
+ }
+
+ void close()
+ {
+ _outfile << "# --------- tensor shape and value defined above ---------" << std::endl;
+ _outfile.close();
+ }
};
} // namespace tflite
* @param[in] tensor The tensor object to be compared
* @return @c true if tensor type is kTfLiteFloat32, otherwise @c false
*/
-inline bool isFloatTensor(const TfLiteTensor *tensor)
-{
- return tensor->type == kTfLiteFloat32;
-}
+inline bool isFloatTensor(const TfLiteTensor *tensor) { return tensor->type == kTfLiteFloat32; }
/**
- * @brief Get @c true if tensor is 4-D tensor and the first dimension length is 1, otherwise @c false
+ * @brief Get @c true if tensor is 4-D tensor and the first dimension length is 1,
+ * otherwise @c false
* @param[in] tensor The tensor object to be compared
* @return @c true if tensor is 4-D tensor and the first dimension length is 1, otherwise @c false
*/
/**
* @brief Class to define TensorView which is inherited from nnfw::util::tensor::Reader<T> class
*/
-template<typename T> class TensorView final : public nnfw::util::tensor::Reader<T>
+template <typename T> class TensorView final : public nnfw::util::tensor::Reader<T>
{
public:
/**
* @param[in] shape The shape of a tensor
* @param[in] base The base address of a tensor
*/
- TensorView(const nnfw::util::tensor::Shape &shape, T *base)
- : _shape{shape}, _base{base}
+ TensorView(const nnfw::util::tensor::Shape &shape, T *base) : _shape{shape}, _base{base}
{
// Set 'stride'
_stride.init(_shape);
nnfw::util::tensor::Shape _shape; /**< The tensor shape */
public:
- T *_base; /**< The base address of tensor */
- nnfw::util::tensor::NonIncreasingStride _stride; /**< The NonIncreasingStride object */
+ T *_base; /**< The base address of tensor */
+ nnfw::util::tensor::NonIncreasingStride _stride; /**< The NonIncreasingStride object */
public:
// TODO Introduce Operand ID class
class FunctionBuilder final : public Builder
{
public:
- using SetupFunc = std::function<void (::tflite::Interpreter &)>;
+ using SetupFunc = std::function<void(::tflite::Interpreter &)>;
public:
/**
namespace Abs
{
- void *InitAbs(TfLiteContext *context, const char *buffer, size_t length);
- void FreeAbs(TfLiteContext *context, void *buffer);
- TfLiteStatus PrepareAbs(TfLiteContext *context, TfLiteNode *node);
- TfLiteStatus EvalAbs(TfLiteContext *context, TfLiteNode *node);
+void *InitAbs(TfLiteContext *context, const char *buffer, size_t length);
+void FreeAbs(TfLiteContext *context, void *buffer);
+TfLiteStatus PrepareAbs(TfLiteContext *context, TfLiteNode *node);
+TfLiteStatus EvalAbs(TfLiteContext *context, TfLiteNode *node);
} // namespace Abs
} // namespace nnfw
namespace nnfw
{
-#define REGISTER_FUNCTION(Name) \
- TfLiteRegistration *Register_##Name(void) \
- { \
- static TfLiteRegistration r = { Name::Init##Name , Name::Free##Name , Name::Prepare##Name , \
- Name::Eval##Name ,}; \
- r.custom_name = #Name; \
- return &r; \
+#define REGISTER_FUNCTION(Name) \
+ TfLiteRegistration *Register_##Name(void) \
+ { \
+ static TfLiteRegistration r = { \
+ Name::Init##Name, Name::Free##Name, Name::Prepare##Name, Name::Eval##Name, \
+ }; \
+ r.custom_name = #Name; \
+ return &r; \
}
REGISTER_FUNCTION(TensorFlowMax)
#undef REGISTER_FUNCTION
-} // namespace nnfw
-} // namespace custom
-} // namespace ops
-} // namespace tflite
+} // namespace nnfw
+} // namespace custom
+} // namespace ops
+} // namespace tflite
#endif // __NNFW_SUPPORT_TFLITE_KERNELS_CUSTOM_OP_H__
/**
* @file SquaredDifference.h
- * @brief This file contains SquaredDifference namespace and SquaredDifference function definitions
+ * @brief This file contains SquaredDifference namespace and SquaredDifference function
+ * definitions
* @ingroup COM_AI_RUNTIME
*/
namespace SquaredDifference
{
- /**
- * @brief Initialize SquaredDifference operand using the contents of buffer
- * @param[in] context The TfLite context
- * @param[in] buffer The buffer with contents
- * @param[in] length The buffer length
- * @return The void pointer for user data
- */
- void *InitSquaredDifference(TfLiteContext *context, const char *buffer, size_t length);
+/**
+ * @brief Initialize SquaredDifference operand using the contents of buffer
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @param[in] length The buffer length
+ * @return The void pointer for user data
+ */
+void *InitSquaredDifference(TfLiteContext *context, const char *buffer, size_t length);
- /**
- * @brief Release any memory it might have allocated via 'InitSquaredDifference'
- * @param[in] context The TfLite context
- * @param[in] buffer The buffer with contents
- * @return N/A
- */
- void FreeSquaredDifference(TfLiteContext *context, void *buffer);
+/**
+ * @brief Release any memory it might have allocated via 'InitSquaredDifference'
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @return N/A
+ */
+void FreeSquaredDifference(TfLiteContext *context, void *buffer);
- /**
- * @brief Prepare the SquaredDifference operand for execution
- * @param[in] context The TfLite context
- * @param[in] node The operand node
- * @return The TfLite status
- */
- TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node);
+/**
+ * @brief Prepare the SquaredDifference operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
+TfLiteStatus PrepareSquaredDifference(TfLiteContext *context, TfLiteNode *node);
- /**
- * @brief Evaluation the SquaredDifference operand for execution
- * @param[in] context The TfLite context
- * @param[in] node The operand node
- * @return The TfLite status
- */
- TfLiteStatus EvalSquaredDifference(TfLiteContext *context, TfLiteNode *node);
+/**
+ * @brief Evaluation the SquaredDifference operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
+TfLiteStatus EvalSquaredDifference(TfLiteContext *context, TfLiteNode *node);
} // namespace SquaredDifference
} // namespace nnfw
namespace TensorFlowMax
{
- /**
- * @brief Initialize TensorFlowMax operand using the contents of buffer
- * @param[in] context The TfLite context
- * @param[in] buffer The buffer with contents
- * @param[in] length The buffer length
- * @return The void pointer for user data
- */
- void *InitTensorFlowMax(TfLiteContext *context, const char *buffer, size_t length);
+/**
+ * @brief Initialize TensorFlowMax operand using the contents of buffer
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @param[in] length The buffer length
+ * @return The void pointer for user data
+ */
+void *InitTensorFlowMax(TfLiteContext *context, const char *buffer, size_t length);
- /**
- * @brief Release any memory it might have allocated via 'InitTensorFlowMax'
- * @param[in] context The TfLite context
- * @param[in] buffer The buffer with contents
- * @return N/A
- */
- void FreeTensorFlowMax(TfLiteContext *context, void *buffer);
+/**
+ * @brief Release any memory it might have allocated via 'InitTensorFlowMax'
+ * @param[in] context The TfLite context
+ * @param[in] buffer The buffer with contents
+ * @return N/A
+ */
+void FreeTensorFlowMax(TfLiteContext *context, void *buffer);
- /**
- * @brief Prepare the TensorFlowMax operand for execution
- * @param[in] context The TfLite context
- * @param[in] node The operand node
- * @return The TfLite status
- */
- TfLiteStatus PrepareTensorFlowMax(TfLiteContext *context, TfLiteNode *node);
+/**
+ * @brief Prepare the TensorFlowMax operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
+TfLiteStatus PrepareTensorFlowMax(TfLiteContext *context, TfLiteNode *node);
- /**
- * @brief Evaluation the TensorFlowMax operand for execution
- * @param[in] context The TfLite context
- * @param[in] node The operand node
- * @return The TfLite status
- */
- TfLiteStatus EvalTensorFlowMax(TfLiteContext *context, TfLiteNode *node);
+/**
+ * @brief Evaluation the TensorFlowMax operand for execution
+ * @param[in] context The TfLite context
+ * @param[in] node The operand node
+ * @return The TfLite status
+ */
+TfLiteStatus EvalTensorFlowMax(TfLiteContext *context, TfLiteNode *node);
} // namespace TensorFlowMax
} // namespace nnfw
namespace TensorFlowSum
{
- void *InitTensorFlowSum(TfLiteContext *context, const char *buffer, size_t length);
- void FreeTensorFlowSum(TfLiteContext *context, void *buffer);
- TfLiteStatus PrepareTensorFlowSum(TfLiteContext *context, TfLiteNode *node);
- TfLiteStatus EvalTensorFlowSum(TfLiteContext *context, TfLiteNode *node);
+void *InitTensorFlowSum(TfLiteContext *context, const char *buffer, size_t length);
+void FreeTensorFlowSum(TfLiteContext *context, void *buffer);
+TfLiteStatus PrepareTensorFlowSum(TfLiteContext *context, TfLiteNode *node);
+TfLiteStatus EvalTensorFlowSum(TfLiteContext *context, TfLiteNode *node);
} // namespace TensorFlowSum
} // namespace nnfw
limitations under the License.
==============================================================================*/
+// NOTE To minimize diff with upstream tensorflow, disable clang-format
+// clang-format off
+
// NOTE This header is derived from the following file (in TensorFlow)
// 'externals/tensorflow/tensorflow/contrib/lite/kernels/register.h'
#ifndef __NNFW_SUPPORT_TFLITE_KERNELS_REGISTER_H__
} // namespace tflite
#endif // __NNFW_SUPPORT_TFLITE_KERNELS_REGISTER_H__
+
+// clang-format on
#include <cstdlib>
#include <string>
-
namespace nnfw
{
namespace util
* @param[in] row The height index
* @param[in] col The width index
*/
- Index(int32_t batch, int32_t ch, int32_t row, int32_t col) : _batch{batch}, _ch{ch}, _row{row}, _col{col}
+ Index(int32_t batch, int32_t ch, int32_t row, int32_t col)
+ : _batch{batch}, _ch{ch}, _row{row}, _col{col}
{
// DO NOTHING
}
* @param[in] height The height value
* @param[in] width The width value
*/
- Shape(int32_t batch, int32_t depth, int32_t height, int32_t width) : N{batch}, C{depth}, H{height}, W{width}
+ Shape(int32_t batch, int32_t depth, int32_t height, int32_t width)
+ : N{batch}, C{depth}, H{height}, W{width}
{
// DO NOTHING
}
}
/**
- * @brief Verify that an obtained float value is equal to the expected float value by using FLT_EPSILON
+ * @brief Verify that an obtained float value is equal to the expected float value
+ * by using FLT_EPSILON
* @param[in] expected An expected float value to be compared
* @param[in] obtained An obtained float value to be compared
* @param[in] tolerance A tolerance value
}
/**
- * @brief Verify that an obtained float value is equal to the expected float value by comparing absolute tolerance value
+ * @brief Verify that an obtained float value is equal to the expected float value
+ * by comparing absolute tolerance value
* @param[in] expected An expected float value to be compared
* @param[in] obtained An obtained float value to be compared
* @param[in] tolerance A tolerance value
*/
struct Shape
{
- int32_t N; /**< The kernel index */
- int32_t C; /**< The channel index */
- int32_t H; /**< The height index */
- int32_t W; /**< The width index */
+ int32_t N; /**< The kernel index */
+ int32_t C; /**< The channel index */
+ int32_t H; /**< The height index */
+ int32_t W; /**< The width index */
/**
* @brief Construct a new Shape object as default
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+
+// NOTE To minimize diff with upstream tensorflow, disable clang-format
+// clang-format off
+
+// NOTE This header is derived from the following file (in TensorFlow v1.12)
+// 'externals/tensorflow/tensorflow/contrib/lite/profiling/profile_buffer.h
#ifndef TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILE_BUFFER_H_
#define TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILE_BUFFER_H_
} // namespace tflite
#endif // TFLITE_PROFILING_ENABLED
#endif // TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILE_BUFFER_H_
+
+// clang-format on
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+
+// NOTE To minimize diff with upstream tensorflow, disable clang-format
+// clang-format off
+
+// NOTE This header is derived from the following file (in TensorFlow v1.12)
+// 'externals/tensorflow/tensorflow/contrib/lite/profiling/profiler.h
#ifndef TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILER_H_
#define TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILER_H_
#endif // TFLITE_PROFILING_ENABLED
#endif // TENSORFLOW_CONTRIB_LITE_PROFILING_PROFILER_H_
+
+// clang-format on
#include <iostream>
-namespace tflite {
-namespace profiling {
+namespace tflite
+{
+namespace profiling
+{
class Profiler; // forward declaration
}
}
public:
const Sync &sync(void) const { return _sync; }
- tflite::profiling::Profiler* getProfiler() { return _profiler; }
- void setProfiler(tflite::profiling::Profiler* p) { _profiler = p; }
+ tflite::profiling::Profiler *getProfiler() { return _profiler; }
+ void setProfiler(tflite::profiling::Profiler *p) { _profiler = p; }
private:
Sync _sync;
- tflite::profiling::Profiler* _profiler;
+ tflite::profiling::Profiler *_profiler;
public:
static Context &get(void)
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
+
+// NOTE To minimize diff with upstream tensorflow, disable clang-format
+// clang-format off
+
+// NOTE This header is derived from the following file (in TensorFlow v1.12)
+// 'externals/tensorflow/tensorflow/contrib/lite/profiling/time.h
#ifndef TENSORFLOW_CONTRIB_LITE_PROFILING_TIME_H_
#define TENSORFLOW_CONTRIB_LITE_PROFILING_TIME_H_
} // namespace profiling
} // namespace tflite
#endif // TENSORFLOW_CONTRIB_LITE_PROFILING_TIME_H_
+
+// clang-format on
* @brief Construct a new @c Comparator object
* @param[in] fn Function that compares two float values
*/
- Comparator(const std::function<bool (float lhs, float rhs)> &fn) : _compare_fn{fn}
+ Comparator(const std::function<bool(float lhs, float rhs)> &fn) : _compare_fn{fn}
{
// DO NOTHING
}
* @return @c std::vector<Diff<float>> containing information of failed comparison
*/
// NOTE Observer should live longer than comparator
- std::vector<Diff<float>> compare(const Shape &shape,
- const Reader<float> &expected,
- const Reader<float> &obtained,
- Observer *observer = nullptr) const;
+ std::vector<Diff<float>> compare(const Shape &shape, const Reader<float> &expected,
+ const Reader<float> &obtained,
+ Observer *observer = nullptr) const;
private:
- std::function<bool (float lhs, float rhs)> _compare_fn;
+ std::function<bool(float lhs, float rhs)> _compare_fn;
};
} // namespace tensor
/**
* @brief Struct to have information after comparing two elements of two tensors
*/
-template<typename T> struct Diff
+template <typename T> struct Diff
{
Index index; /**< Index of elements in two tensors, which turn out to be different */
size_t rank = origin.rank();
Index target(rank);
for (int i = 0; i < rank; i++)
- target.at(i) = origin.at(rank-1 -i);
+ target.at(i) = origin.at(rank - 1 - i);
return target;
}
const size_t rank = _shape.rank();
// Find axis to be updated
- while((_cursor < rank) && !(_index.at(_cursor) + 1 < _shape.dim(_cursor)))
+ while ((_cursor < rank) && !(_index.at(_cursor) + 1 < _shape.dim(_cursor)))
{
++_cursor;
}
- if(_cursor == rank)
+ if (_cursor == rank)
{
return;
}