// buffer. So if the training is false, the output is the same with input. In
// other words, there is nothing happen during inference.
- if (training && rate_ > epsilon) {
- for (unsigned int i = 0; i < context.getNumInputs(); ++i) {
- Tensor &input_ = context.getInput(i);
- Tensor &output_ = context.getOutput(i);
- Tensor &mask_ = context.getTensor(mask_idx[i]);
+ for (unsigned int i = 0; i < context.getNumInputs(); ++i) {
+ Tensor &input_ = context.getInput(i);
+ Tensor &output_ = context.getOutput(i);
+ /** @todo make this in-place */
+ if (training && rate_ > epsilon) {
+ Tensor &mask_ = context.getTensor(mask_idx[i]);
mask_ = input_.dropout_mask(rate_);
- input_.multiply_i(mask_);
-
- /** @todo: remove below once in_place support is ready from manager */
+ input_.multiply(mask_, output_);
+ } else {
output_.fill(input_);
}
}
void DropOutLayer::calcDerivative(RunLayerContext &context) {
// Assume it is in-place calculation
auto &rate_ = std::get<props::DropOutRate>(dropout_rate).get();
- if (rate_ > epsilon) {
- for (unsigned int i = 0; i < context.getNumInputs(); ++i) {
- Tensor &derivative_ = context.getIncomingDerivative(i);
- Tensor &ret_ = context.getOutgoingDerivative(SINGLE_INOUT_IDX);
- Tensor &mask_ = context.getTensor(mask_idx[i]);
- derivative_.multiply_i(mask_);
+ for (unsigned int i = 0; i < context.getNumInputs(); ++i) {
+ Tensor &derivative_ = context.getIncomingDerivative(i);
+ Tensor &ret_ = context.getOutgoingDerivative(SINGLE_INOUT_IDX);
- /** @todo: remove below once in_place support is ready from manager */
+ /** @todo make this in-place */
+ if (rate_ > epsilon) {
+ Tensor &mask_ = context.getTensor(mask_idx[i]);
+ derivative_.multiply(mask_, ret_);
+ } else {
ret_.fill(derivative_);
}
}
return_state=False)
record_single(gru, (3, 4, 7), "gru_multi_step_seq_act", input_type='float')
-inspect_file("gru_single_step_seq.nnlayergolden")
+ dropout = K.layers.Dropout(rate=0.2)
+ record_single(dropout, (2, 3, 2, 3), "dropout_20_training", {"training": True})
+ record_single(dropout, (2, 3, 2, 3), "dropout_20_inference", {"training": False})
+
+ dropout = K.layers.Dropout(rate=0.0)
+ record_single(dropout, (2, 3, 2, 3), "dropout_0_training", {"training": True})
+
+ dropout = K.layers.Dropout(rate=0.9999)
+ record_single(dropout, (2, 3, 2, 3), "dropout_100_training", {"training": True})
+
+inspect_file("dropout_20_training.nnlayergolden")
FORWARD_MODE_INFERENCE =
1 << 2, /**< set if layer should be forwarded with inference mode */
+ DROPOUT_MATCH_60_PERCENT = 1 << 3, /**< set if only 60 percentage output
+ match is sufficient for dropout */
DEFAULT =
0, /**< default set up, compare forward, backward in training mode */
} LayerGoldenTestParamOptions;
bool shouldForwardWithInferenceMode();
/**
+ * @brief check if given test suite must compare results using with a percent
+ * match for the tensors enabled
+ *
+ * @return bool layer should be match approximately
+ */
+ bool shouldMatchDropout60Percent();
+
+ /**
* @brief check if given test suite should skip calculating derivative
*
* @return bool true if should skip calculating derivative
}
static void compareRunContext(RunLayerContext &rc, std::ifstream &file,
- bool skip_grad, bool skip_deriv) {
+ bool skip_grad, bool skip_deriv,
+ bool dropout_match) {
file.seekg(0, std::ios::beg);
- auto compare_tensors = [&file](unsigned length, auto tensor_getter, auto pred,
- bool skip_compare, const std::string &name) {
+ auto compare_percentage_tensors = [](const Tensor &t1, const Tensor &t2,
+ unsigned int match_percentage) -> bool {
+ if (match_percentage == 100)
+ return t1 == t2;
+
+ if (t1.getDim() != t2.getDim())
+ return false;
+
+ unsigned int total = t1.size();
+ unsigned int weak_match = 0;
+ unsigned int strong_match = 0;
+
+ for (unsigned int idx = 0; idx < total; idx++) {
+ auto d1 = t1.getValue(idx);
+ auto d2 = t2.getValue(idx);
+ /** either both the values must be equal or 1 must be zero */
+ weak_match +=
+ std::min((d1 == d2) + (d1 == 0 && d2 != 0) + (d1 != 0 && d2 == 0), 1);
+ strong_match += (d1 == d2);
+ }
+
+ return (weak_match == total) &
+ (strong_match >= (total * match_percentage) / 100);
+ };
+
+ auto compare_tensors = [&file, compare_percentage_tensors](
+ unsigned length, auto tensor_getter, auto pred,
+ bool skip_compare, const std::string &name,
+ unsigned int match_percentage = 100) {
for (unsigned i = 0; i < length; ++i) {
if (!pred(i)) {
continue;
if (skip_compare) {
continue;
}
- EXPECT_EQ(tensor, answer) << name << " at " << std::to_string(i);
+ EXPECT_TRUE(compare_percentage_tensors(tensor, answer, match_percentage))
+ << name << " at " << std::to_string(i);
}
};
return rc.weightHasGradient(idx);
};
+ int match_percentage = 100;
+ if (dropout_match)
+ match_percentage = 60;
+
constexpr bool skip_compare = true;
compare_tensors(rc.getNumWeights(),
!skip_compare, "inputs");
compare_tensors(rc.getNumOutputs(),
[&rc](unsigned idx) { return rc.getOutput(idx); },
- always_read, !skip_compare, "outputs");
+ always_read, !skip_compare, "outputs", match_percentage);
compare_tensors(rc.getNumWeights(),
[&rc](unsigned idx) { return rc.getWeightGrad(idx); },
only_read_trainable, skip_grad, "gradients");
always_read, !skip_compare, "weights");
compare_tensors(rc.getNumInputs(),
[&rc](unsigned idx) { return rc.getOutgoingDerivative(idx); },
- always_read, skip_deriv, "derivatives");
+ always_read, skip_deriv, "derivatives", match_percentage);
}
LayerGoldenTest::~LayerGoldenTest() {}
void LayerGoldenTest::TearDown() {}
+bool LayerGoldenTest::shouldMatchDropout60Percent() {
+ return std::get<int>(GetParam()) &
+ LayerGoldenTestParamOptions::DROPOUT_MATCH_60_PERCENT;
+}
+
bool LayerGoldenTest::shouldForwardWithInferenceMode() {
return std::get<int>(GetParam()) &
LayerGoldenTestParamOptions::FORWARD_MODE_INFERENCE;
bool skip_calc_grad = shouldSkipCalcGrad();
bool skip_calc_deriv = shouldSkipCalcDeriv();
+ bool dropout_compare_60_percent = shouldMatchDropout60Percent();
for (int i = 0; i < 4; ++i) {
/// warm layer multiple times
layer->calcDerivative(rc);
}
- compareRunContext(rc, golden_file, skip_calc_grad, skip_calc_deriv);
+ compareRunContext(rc, golden_file, skip_calc_grad, skip_calc_deriv,
+ dropout_compare_60_percent);
EXPECT_TRUE(true); // stub test for tcm
}
'unittest_layers_concat.cpp',
'unittest_layers_permute.cpp',
'unittest_layers_attention.cpp',
+ 'unittest_layers_dropout.cpp',
]
if get_option('enable-tflite-backbone')
--- /dev/null
+// SPDX-License-Identifier: Apache-2.0
+/**
+ * Copyright (C) 2021 Parichay Kapoor <pk.kapoor@samsung.com>
+ *
+ * @file unittest_layers_dropout.cpp
+ * @date 15 October 2021
+ * @brief Dropout Layer Test
+ * @see https://github.com/nnstreamer/nntrainer
+ * @author Parichay Kapoor <pk.kapoor@samsung.com>
+ * @bug No known bugs except for NYI items
+ */
+#include <tuple>
+
+#include <gtest/gtest.h>
+
+#include <dropout.h>
+#include <layers_common_tests.h>
+
+auto semantic_dropout =
+ LayerSemanticsParamType(nntrainer::createLayer<nntrainer::DropOutLayer>,
+ nntrainer::DropOutLayer::type, {}, 0, false, 1);
+
+INSTANTIATE_TEST_CASE_P(Dropout, LayerSemantics,
+ ::testing::Values(semantic_dropout));
+
+auto dropout_inference_option =
+ LayerGoldenTestParamOptions::SKIP_CALC_GRAD |
+ LayerGoldenTestParamOptions::SKIP_CALC_DERIV |
+ LayerGoldenTestParamOptions::FORWARD_MODE_INFERENCE;
+
+auto dropout_20_training = LayerGoldenTestParamType(
+ nntrainer::createLayer<nntrainer::DropOutLayer>, {"dropout_rate=0.2"},
+ "2:3:2:3", "dropout_20_training.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT |
+ LayerGoldenTestParamOptions::DROPOUT_MATCH_60_PERCENT);
+
+auto dropout_20_inference = LayerGoldenTestParamType(
+ nntrainer::createLayer<nntrainer::DropOutLayer>, {"dropout_rate=0.2"},
+ "2:3:2:3", "dropout_20_inference.nnlayergolden", dropout_inference_option);
+
+auto dropout_0_training = LayerGoldenTestParamType(
+ nntrainer::createLayer<nntrainer::DropOutLayer>, {"dropout_rate=0.0"},
+ "2:3:2:3", "dropout_0_training.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT);
+
+auto dropout_100_training = LayerGoldenTestParamType(
+ nntrainer::createLayer<nntrainer::DropOutLayer>, {"dropout_rate=1.0"},
+ "2:3:2:3", "dropout_100_training.nnlayergolden",
+ LayerGoldenTestParamOptions::DEFAULT);
+
+INSTANTIATE_TEST_CASE_P(Dropout, LayerGoldenTest,
+ ::testing::Values(dropout_20_training,
+ dropout_0_training,
+ dropout_100_training,
+ dropout_20_inference));