#ifdef DEBUG
std::cout << "input: " << context.getInput(SINGLE_INOUT_IDX);
std::cout << "output: " << context.getOutput(SINGLE_INOUT_IDX);
- PowUtil::pause();
+ /// PowUtil::pause();
#endif
}
#ifdef DEBUG
std::cout << "input: " << context.getOutput(SINGLE_INOUT_IDX);
std::cout << "output: " << context.getInput(SINGLE_INOUT_IDX);
- PowUtil::pause();
+ /// PowUtil::pause();
#endif
}
export DEB_HOST_ARCH ?= $(shell dpkg-architecture -qDEB_HOST_ARCH)
ifdef unit_test
export ENABLE_REDUCE_TOLERANCE ?= false
+ export ENABLE_DEBUG?= false
else
export ENABLE_REDUCE_TOLERANCE ?= true
+ export ENABLE_DEBUG?= true
endif
%:
meson --buildtype=plain --prefix=/usr --sysconfdir=/etc \
--libdir=lib/$(DEB_HOST_MULTIARCH) --bindir=lib/nntrainer/bin \
--includedir=include -Dinstall-app=true \
- -Dreduce-tolerance=$(ENABLE_REDUCE_TOLERANCE) build
+ -Dreduce-tolerance=$(ENABLE_REDUCE_TOLERANCE) \
+ -Denable-debug=$(ENABLE_DEBUG) build
override_dh_auto_build:
ninja -C build
add_project_arguments('-DPROFILE=1', language:['c', 'cpp'])
endif
+if get_option('enable-debug')
+ add_project_arguments('-DDEBUG=1', language:['c', 'cpp'])
+endif
+
if get_option('use_gym')
add_project_arguments('-DUSE_GYM=1', language:['c','cpp'])
endif
option('enable-tflite-backbone', type: 'boolean', value: true)
option('enable-android', type: 'boolean', value: false)
option('enable-profile', type: 'boolean', value: false)
+option('enable-debug', type: 'boolean', value: true)
option('enable-tflite-interpreter', type: 'boolean', value: true)
# dependency conflict resolution
if (old_batch != new_batch && num_observed == new_batch) {
#if DEBUG
NNTR_THROW_IF_CLEANUP(iq->empty_mutex.try_lock(), std::runtime_error,
- [iq] { iq->empty_mutex.unlock(); })
+ [this] { iq->empty_mutex.unlock(); })
<< "iteration queue must be locked already but empty_mutex is not "
"locked.";
#endif
* references which leads to nasty bugs. This validation ensures that the
* tensors are not set mistakenly by verifying their unique names
*/
-#ifdef ENABLE_TEST
+#ifdef DEBUG
if (tensor_map.empty() || !tensor_map[inputs[0]->getName()]) {
auto filler = [this](const auto &vec) {
for (auto const &val : vec) {
};
auto matcher_w = [this, matcher](const std::vector<Weight *> &vec) {
- auto ret = true;
- for (auto const &val : vec)
- ret &= matcher(val);
- return ret;
+ return std::all_of(vec.begin(), vec.end(), matcher);
};
auto matcher_vw = [this, matcher](const std::vector<Var_Grad *> &vec,
bool skip_grad = false) {
+ return std::all_of(vec.begin(), vec.end(),
+ std::bind(matcher, std::placeholders::_1, skip_grad));
auto ret = true;
for (auto const &val : vec)
ret &= matcher(val, skip_grad);
std::vector<Var_Grad *> outputs; /**< outputs of the layer */
std::vector<Var_Grad *> tensors; /**< tensors of the layer */
-#ifdef ENABLE_TEST
+#ifdef DEBUG
std::map<std::string, const void *>
tensor_map; /**< map of tensor name to tensor address */
#endif
layer->forwarding(*run_context, training);
END_PROFILE(forward_event_key);
-#ifdef ENABLE_TEST
+#ifdef DEBUG
if (!run_context->validate(getNumInputConnections() == 0, !requireLabel()))
throw std::runtime_error("Running forwarding() layer " + getName() +
" invalidated the context.");
layer->calcDerivative(*run_context);
END_PROFILE(calc_deriv_event_key);
-#ifdef ENABLE_TEST
+#ifdef DEBUG
if (!run_context->validate(getNumInputConnections() == 0, !requireLabel()))
throw std::runtime_error("Running calcDerivative() layer " + getName() +
" invalidated the context.");
layer->calcGradient(*run_context);
END_PROFILE(calc_grad_event_key);
-#ifdef ENABLE_TEST
+#ifdef DEBUG
if (!run_context->validate(getNumInputConnections() == 0, !requireLabel()))
throw std::runtime_error("Running calcGradient() layer " + getName() +
" invalidated the context.");
#ifdef DEBUG
std::vector<TensorDim> out_tf_dim;
setDimensions(interpreter->outputs(), out_tf_dim, true);
- if (out_tf_dim.size() != output_dim.size()) {
+ if (out_tf_dim.size() != context.getNumOutputs()) {
throw std::invalid_argument(
"[TfliteLayer::forward] number of output dimension does not match");
}
for (unsigned int i = 0; i < out_tf_dim.size(); ++i) {
- if (output_dim[i] != out_tf_dim[i]) {
- throw std::invalid_argumetns(
+ if (context.getOutput(i).getDim() != out_tf_dim[i]) {
+ throw std::invalid_argument(
"[TfliteLayer::forward] output dimension does not match");
}
}
*/
#include <basic_planner.h>
+#include <nntrainer_error.h>
namespace nntrainer {
%define enable_profile -Denable-profile=false
%define capi_ml_pkg_dep_resolution -Dcapi-ml-inference-actual=%{?capi_ml_inference_pkg_name} -Dcapi-ml-common-actual=%{?capi_ml_common_pkg_name}
%define enable_reduce_tolerance -Dreduce-tolerance=true
+%define enable_debug -Denable-debug=false
# enable full tolerance on the CI
%if 0%{?unit_test}
%define enable_reduce_tolerance -Dreduce-tolerance=false
%endif
+# enable debug on the CI for build
+%if 0%{?unit_test}
+%define enable_debug -Denable-debug=true
+%endif
+
%if %{with tizen}
%define platform -Dplatform=tizen
%{enable_gym} %{enable_nnstreamer_tensor_filter} %{enable_profile} \
%{enable_nnstreamer_backbone} %{enable_tflite_backbone} \
%{enable_tflite_interpreter} %{capi_ml_pkg_dep_resolution} \
- %{enable_reduce_tolerance} build
+ %{enable_reduce_tolerance} %{enable_debug} build
ninja -C build %{?_smp_mflags}