3 * Copyright (C) 2018-2019 Intel Corporation.
5 * The source code contained or described herein and all documents
6 * related to the source code ("Material") are owned by Intel Corporation
7 * or its suppliers or licensors. Title to the Material remains with
8 * Intel Corporation or its suppliers and licensors. The Material may
9 * contain trade secrets and proprietary and confidential information
10 * of Intel Corporation and its suppliers and licensors, and is protected
11 * by worldwide copyright and trade secret laws and treaty provisions.
12 * No part of the Material may be used, copied, reproduced, modified,
13 * published, uploaded, posted, transmitted, distributed, or disclosed
14 * in any way without Intel's prior express written permission.
16 * No license under any patent, copyright, trade secret or other
17 * intellectual property right is granted to or conferred upon you by
18 * disclosure or delivery of the Materials, either expressly, by implication,
19 * inducement, estoppel or otherwise. Any license under such intellectual
20 * property rights must be express and approved by Intel in writing.
22 * Include any supplier copyright notices as supplier requires Intel to use.
24 * Include supplier trademarks or logos as supplier requires Intel to use,
25 * preceded by an asterisk. An asterisked footnote can be added as follows:
26 * *Third Party trademarks are the property of their respective owners.
28 * Unless otherwise agreed by Intel in writing, you may not remove or alter
29 * this notice or any other notice embedded in Materials by Intel or Intel's
30 * suppliers or licensors in any way.
35 #include <gtest/gtest.h>
36 #include <inference_engine/layer_transform.hpp>
37 #include <gna_plugin/quantization/model_quantizer.hpp>
38 #include "gna_plugin/quantization/layer_quantizer.hpp"
39 #include "gna_matcher.hpp"
41 using namespace InferenceEngine;
42 using namespace GNAPluginNS;
43 using namespace GNATestIRs;
45 class GNAAOTTests : public GNATest {
47 std::list<std::string> files_to_remove;
48 std::string registerFileForRemove(std::string file_to_remove) {
49 files_to_remove.push_back(file_to_remove);
50 return file_to_remove;
52 void TearDown() override {
53 for (auto & file : files_to_remove) {
54 std::remove(file.c_str());
58 void SetUp() override {
62 TEST_F(GNAAOTTests, AffineWith2AffineOutputs_canbe_export_imported) {
64 const std::string X = registerFileForRemove("unit_tests.bin");
66 // running export to a file
67 export_network(AffineWith2AffineOutputsModel())
68 .inNotCompactMode().as().gna().model().to(X);
70 // running infer using imported model instead of IR
71 assert_that().onInferModel().importedFrom(X)
72 .inNotCompactMode().gna().propagate_forward().called().once();
76 TEST_F(GNAAOTTests, AffineWith2AffineOutputs_canbe_imported_verify_structure) {
78 auto & nnet_type = storage<intel_nnet_type_t>();
80 // saving pointer to nnet - todo probably deep copy required
81 save_args().onInferModel(AffineWith2AffineOutputsModel())
82 .inNotCompactMode().from().gna().propagate_forward().to(&nnet_type);
84 const std::string X = registerFileForRemove("unit_tests.bin");
86 // running export to a file
87 export_network(AffineWith2AffineOutputsModel())
88 .inNotCompactMode().as().gna().model().to(X);
90 // running infer using imported model instead of IR
91 assert_that().onInferModel().importedFrom(X)
92 .inNotCompactMode().gna().propagate_forward().called_with().exact_nnet_structure(&nnet_type);
96 TEST_F(GNAAOTTests, CanConvertFromAOTtoSueModel) {
98 auto & nnet_type = storage<intel_nnet_type_t>();
100 // saving pointer to nnet - todo probably deep copy required
101 save_args().onInferModel(AffineWith2AffineOutputsModel())
102 .inNotCompactMode().from().gna().propagate_forward().to(&nnet_type);
104 const std::string X = registerFileForRemove("unit_tests.bin");
106 // running export to a file
107 export_network(AffineWith2AffineOutputsModel())
108 .inNotCompactMode().as().gna().model().to(X);
110 // running infer using imported model instead of IR
111 assert_that().onInferModel().importedFrom(X)
112 .inNotCompactMode().withGNAConfig(GNA_CONFIG_KEY(FIRMWARE_MODEL_IMAGE), "sue.dump").gna().dumpXNN().called();