1 // SPDX-License-Identifier: Apache-2.0
3 * Copyright (C) 2021 Jihoon Lee <jhoon.it.lee@samsung.com>
5 * @file layer_golden_tests.cpp
7 * @brief Common golden test for nntrainer layers (Param Tests)
8 * @see https://github.com/nnstreamer/nntrainer
9 * @author Jihoon Lee <jhoon.it.lee@samsung.com>
10 * @bug No known bugs except for NYI items
12 #include <layers_common_tests.h>
15 #include <type_traits>
17 #include <base_properties.h>
18 #include <layer_context.h>
19 #include <layer_devel.h>
20 #include <nntrainer_test_util.h>
21 #include <util_func.h>
25 #pragma GCC diagnostic ignored "-Wunused-local-typedefs"
27 using namespace nntrainer;
29 using TensorPacks = std::tuple<
30 std::vector<Weight> /**< weight */, std::vector<Var_Grad> /**< in */,
31 std::vector<Var_Grad> /**< out */, std::vector<Var_Grad> /**< tensors */>;
34 * @brief read Tensor with size check
36 * @param t tensor to read
37 * @param file file stream
39 static void sizeCheckedReadTensor(Tensor &t, std::ifstream &file,
40 const std::string &error_msg = "") {
42 checkedRead(file, (char *)&sz, sizeof(unsigned));
43 NNTR_THROW_IF(t.getDim().getDataLen() != sz, std::invalid_argument)
44 << "[ReadFail] dimension does not match at " << error_msg << " sz: " << sz
45 << " dimsize: " << t.getDim().getDataLen() << '\n';
50 * @brief Get the layer Path object
52 * @param file_name file name
53 * @return const std::string model path
55 static const std::string getGoldenPath(const std::string &file_name) {
56 return getResPath(file_name, {"test", "unittest_layers"});
59 static InitLayerContext createInitContext(Layer *layer,
60 const std::string &input_shape_str) {
61 struct shape_parser_ : Property<TensorDim> {
62 using prop_tag = dimension_prop_tag;
65 std::vector<shape_parser_> parsed;
66 from_string(input_shape_str, parsed);
68 InitLayerContext context({parsed.begin(), parsed.end()}, 1, false,
70 layer->finalize(context);
75 static TensorPacks prepareTensors(const InitLayerContext &context,
76 std::ifstream &file) {
77 auto allocate_inouts = [&file](const auto &dims) {
78 std::vector<Var_Grad> vg;
79 vg.reserve(dims.size());
81 for (auto &dim : dims) {
82 vg.emplace_back(dim, Tensor::Initializer::NONE, true, true);
83 sizeCheckedReadTensor(vg.back().getVariableRef(), file,
89 auto allocate_tensors = [](const auto &specs) {
90 std::vector<Var_Grad> vg;
91 vg.reserve(specs.size());
93 for (auto &spec : specs) {
94 vg.emplace_back(spec, true);
99 auto allocate_weights = [&file](const auto &specs) {
100 std::vector<Weight> weights;
101 weights.reserve(specs.size());
103 for (auto &spec : specs) {
104 weights.emplace_back(spec, true);
105 sizeCheckedReadTensor(weights.back().getVariableRef(), file,
106 weights.back().getName());
107 weights.back().getGradientRef().setZero();
113 allocate_weights(context.getWeightsSpec()),
114 allocate_inouts(context.getInputDimensions()),
115 allocate_inouts(context.getOutputDimensions()),
116 allocate_tensors(context.getTensorsSpec()),
120 static RunLayerContext prepareRunContext(const TensorPacks &packs) {
121 auto &[weights, ins, outs, tensors] = packs;
122 auto create_view = [](const auto &var_grads) {
123 using ptr_type_ = std::add_pointer_t<
124 typename std::decay_t<decltype(var_grads)>::value_type>;
125 std::vector<std::remove_cv_t<ptr_type_>> ret;
126 ret.reserve(var_grads.size());
128 for (auto &vg : var_grads) {
129 ret.push_back(const_cast<ptr_type_>(&vg));
136 RunLayerContext("golden", true, 0.0f, false, create_view(weights),
137 create_view(ins), create_view(outs), create_view(tensors));
139 auto num_outputs = rc.getNumOutputs();
141 for (unsigned i = 0; i < num_outputs; ++i) {
142 rc.getOutput(i).setRandUniform(); /// randomize output
143 rc.getIncomingDerivative(i).setValue(
144 2.0); /// incoming derivative is fixed to 2
150 static void compareRunContext(RunLayerContext &rc, std::ifstream &file,
151 bool skip_grad, bool skip_deriv,
152 bool dropout_match) {
153 file.seekg(0, std::ios::beg);
154 auto compare_percentage_tensors = [](const Tensor &t1, const Tensor &t2,
155 unsigned int match_percentage) -> bool {
156 if (match_percentage == 100) {
161 if (t1.getDim() != t2.getDim())
164 unsigned int total = t1.size();
165 unsigned int weak_match = 0;
166 unsigned int strong_match = 0;
168 for (unsigned int idx = 0; idx < total; idx++) {
169 auto d1 = t1.getValue(idx);
170 auto d2 = t2.getValue(idx);
171 /** either both the values must be equal or 1 must be zero */
173 std::min((d1 == d2) + (d1 == 0 && d2 != 0) + (d1 != 0 && d2 == 0), 1);
174 strong_match += (d1 == d2);
177 return (weak_match == total) &
178 (strong_match >= (total * match_percentage) / 100);
181 auto compare_tensors = [&file, compare_percentage_tensors](
182 unsigned length, auto tensor_getter, auto pred,
183 bool skip_compare, const std::string &name,
184 unsigned int match_percentage = 100) {
185 for (unsigned i = 0; i < length; ++i) {
189 const auto &tensor = tensor_getter(i);
190 auto answer = tensor.clone();
191 sizeCheckedReadTensor(answer, file, name + " at " + std::to_string(i));
196 EXPECT_TRUE(compare_percentage_tensors(tensor, answer, match_percentage))
197 << name << " at " << std::to_string(i);
201 auto always_read = [](unsigned idx) { return true; };
202 auto only_read_trainable = [&rc](unsigned idx) {
203 return rc.weightHasGradient(idx);
206 int match_percentage = 100;
208 match_percentage = 60;
210 constexpr bool skip_compare = true;
212 compare_tensors(rc.getNumWeights(),
213 [&rc](unsigned idx) { return rc.getWeight(idx); },
214 always_read, skip_compare, "initial_weights");
215 compare_tensors(rc.getNumInputs(),
216 [&rc](unsigned idx) { return rc.getInput(idx); }, always_read,
217 !skip_compare, "inputs");
218 compare_tensors(rc.getNumOutputs(),
219 [&rc](unsigned idx) { return rc.getOutput(idx); },
220 always_read, !skip_compare, "outputs", match_percentage);
221 compare_tensors(rc.getNumWeights(),
222 [&rc](unsigned idx) { return rc.getWeightGrad(idx); },
223 only_read_trainable, skip_grad, "gradients");
224 compare_tensors(rc.getNumWeights(),
225 [&rc](unsigned idx) { return rc.getWeight(idx); },
226 always_read, !skip_compare, "weights");
227 compare_tensors(rc.getNumInputs(),
228 [&rc](unsigned idx) { return rc.getOutgoingDerivative(idx); },
229 always_read, skip_deriv, "derivatives", match_percentage);
232 LayerGoldenTest::~LayerGoldenTest() {}
234 void LayerGoldenTest::SetUp() {}
236 void LayerGoldenTest::TearDown() {}
238 bool LayerGoldenTest::shouldMatchDropout60Percent() {
239 return std::get<int>(GetParam()) &
240 LayerGoldenTestParamOptions::DROPOUT_MATCH_60_PERCENT;
243 bool LayerGoldenTest::shouldForwardWithInferenceMode() {
244 return std::get<int>(GetParam()) &
245 LayerGoldenTestParamOptions::FORWARD_MODE_INFERENCE;
248 bool LayerGoldenTest::shouldSkipCalcDeriv() {
249 return std::get<int>(GetParam()) &
250 LayerGoldenTestParamOptions::SKIP_CALC_DERIV;
253 bool LayerGoldenTest::shouldSkipCalcGrad() {
254 return std::get<int>(GetParam()) &
255 LayerGoldenTestParamOptions::SKIP_CALC_GRAD;
258 TEST_P(LayerGoldenTest, run) {
259 auto f = std::get<0>(GetParam());
260 auto layer = f(std::get<1>(GetParam()));
261 auto golden_file = checkedOpenStream<std::ifstream>(
262 getGoldenPath(std::get<3>(GetParam())), std::ios::in | std::ios::binary);
263 auto &input_dims = std::get<2>(GetParam());
265 auto ic = createInitContext(layer.get(), input_dims);
266 auto tensors = prepareTensors(ic, golden_file);
267 auto rc = prepareRunContext(tensors);
269 bool skip_calc_grad = shouldSkipCalcGrad();
270 bool skip_calc_deriv = shouldSkipCalcDeriv();
271 bool dropout_compare_60_percent = shouldMatchDropout60Percent();
273 for (int i = 0; i < 4; ++i) {
274 /// warm layer multiple times
275 layer->forwarding(rc, !shouldForwardWithInferenceMode());
278 layer->forwarding(rc, !shouldForwardWithInferenceMode());
279 if (!skip_calc_grad) {
280 layer->calcGradient(rc);
282 if (!skip_calc_deriv) {
283 layer->calcDerivative(rc);
286 compareRunContext(rc, golden_file, skip_calc_grad, skip_calc_deriv,
287 dropout_compare_60_percent);
289 EXPECT_TRUE(true); // stub test for tcm