1 // Copyright (C) 2018-2019 Intel Corporation
2 // SPDX-License-Identifier: Apache-2.0
5 #include <gtest/gtest.h>
6 #include <inference_engine/parsers.h>
7 #include <inference_engine/ie_cnn_net_reader_impl.h>
8 #include <test_model_path.hpp>
9 #include <mock_icnn_network.hpp>
10 #include <gmock/gmock-more-actions.h>
11 #include "cnn_network_impl.hpp"
12 #include "mock_iformat_parser.hpp"
13 #include <test_assertions.hpp>
14 #include <single_layer_common.hpp>
16 using namespace testing;
17 using namespace InferenceEngine;
18 using namespace InferenceEngine::details;
21 class CNNNetReaderImplTest : public ::testing::Test {
27 struct MockFormatParserCreator : public FormatParserCreator {
28 MockFormatParserCreator() {
29 _parser = make_shared<MockIFormatParser>();
32 std::shared_ptr<IFormatParser> create(int version) override {
36 MockIFormatParser* getParser() {
41 std::shared_ptr<MockIFormatParser> _parser;
44 TEST_F(CNNNetReaderImplTest, validateIsCalled) {
46 "<net name=\"PVANET\" version=\"2\" batch=\"1\">"
48 " <layer name=\"data\" type=\"Input\" precision=\"FP32\" id=\"0\">"
58 " <layer name=\"conv1_1_conv\" type=\"Convolution\" precision=\"FP32\" id=\"2\">"
59 " <convolution_data stride-x=\"2\" stride-y=\"2\" pad-x=\"3\" pad-y=\"3\" kernel-x=\"7\" kernel-y=\"7\" output=\"16\" group=\"1\"/>"
76 " <weights offset=\"0\" size=\"9408\"/>"
77 " <biases offset=\"9408\" size=\"64\"/>"
79 " <layer name=\"conv1_1_neg\" type=\"Power\" precision=\"FP32\" id=\"3\">"
80 " <power_data power=\"1\" scale=\"-1\" shift=\"0\"/>"
98 " <layer name=\"conv1_1_concat\" type=\"Concat\" precision=\"FP32\" id=\"4\">"
99 " <concat_data axis=\"1\"/>"
123 " <layer name=\"conv1_1_scale\" type=\"ScaleShift\" precision=\"FP32\" id=\"5\">"
140 " <weights offset=\"9472\" size=\"128\"/>"
141 " <biases offset=\"9600\" size=\"128\"/>"
143 " <layer name=\"conv1_1_relu\" type=\"ReLU\" precision=\"FP32\" id=\"6\">"
144 " <data negative_slope=\"0\" engine=\"caffe.ReLUParameter.DEFAULT\"/>"
162 " <layer name=\"pool1\" type=\"Pooling\" precision=\"FP32\" id=\"7\">"
163 " <pooling_data kernel-x=\"3\" kernel-y=\"3\" pad-x=\"0\" pad-y=\"0\" stride-x=\"2\" stride-y=\"2\" rounding-type=\"ceil\" pool-method=\"max\"/>"
183 " <edge from-layer=\"0\" from-port=\"0\" to-layer=\"2\" to-port=\"2\"/>"
184 " <edge from-layer=\"2\" from-port=\"3\" to-layer=\"3\" to-port=\"4\"/>"
185 " <edge from-layer=\"2\" from-port=\"3\" to-layer=\"4\" to-port=\"6\"/>"
186 " <edge from-layer=\"3\" from-port=\"5\" to-layer=\"4\" to-port=\"7\"/>"
187 " <edge from-layer=\"4\" from-port=\"8\" to-layer=\"5\" to-port=\"9\"/>"
188 " <edge from-layer=\"5\" from-port=\"10\" to-layer=\"6\" to-port=\"11\"/>"
189 " <edge from-layer=\"6\" from-port=\"12\" to-layer=\"7\" to-port=\"13\"/>"
192 auto parserCreator = make_shared<MockFormatParserCreator>();
193 CNNNetReaderImpl reader(parserCreator);
194 auto network = make_shared<MockCNNNetworkImpl>();
195 auto name = std::string{"AlexNet"};
197 EXPECT_CALL(*parserCreator->getParser(), Parse(_)).Times(1).WillOnce(Return(network));
198 EXPECT_CALL(*network.get(), validate(_)).Times(1);
199 EXPECT_CALL(*network.get(), getName()).Times(1).WillOnce(ReturnRef(name));
201 ASSERT_NO_THROW(sts = reader.ReadNetwork(model.data(), model.length(), &resp));
205 TEST_F(CNNNetReaderImplTest, cycleIsDetectedInReader) {
207 "<net batch=\"1\" name=\"model\" version=\"2\">"
209 " <layer id=\"0\" name=\"data\" precision=\"FP32\" type=\"Input\">"
219 " <layer id=\"1\" name=\"conv1\" precision=\"FP32\" type=\"Convolution\">"
220 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"3\" kernel-y=\"3\" output=\"64\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,2,2\" stride-x=\"2\" stride-y=\"2\"/>"
238 " <weights offset=\"0\" size=\"6912\"/>"
239 " <biases offset=\"6912\" size=\"256\"/>"
242 " <layer id=\"2\" name=\"relu_conv1\" precision=\"FP32\" type=\"ReLU\">"
243 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
261 " <layer id=\"3\" name=\"pool1\" precision=\"FP32\" type=\"Pooling\">"
262 " <data exclude-pad=\"false\" kernel-x=\"3\" kernel-y=\"3\" pad-x=\"0\" pad-y=\"0\" pool-method=\"max\" rounding_type=\"ceil\" stride=\"1,1,2,2\" stride-x=\"2\" stride-y=\"2\"/>"
280 " <layer id=\"4\" name=\"fire2/squeeze1x1\" precision=\"FP32\" type=\"Convolution\">"
281 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"16\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
299 " <weights offset=\"7168\" size=\"4096\"/>"
300 " <biases offset=\"11264\" size=\"64\"/>"
303 " <layer id=\"5\" name=\"fire2/relu_squeeze1x1\" precision=\"FP32\" type=\"ReLU\">"
304 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
322 " <layer id=\"6\" name=\"fire2/expand1x1\" precision=\"FP32\" type=\"Convolution\">"
323 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"64\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
341 " <weights offset=\"11328\" size=\"4096\"/>"
342 " <biases offset=\"15424\" size=\"256\"/>"
345 " <layer id=\"7\" name=\"fire2/relu_expand1x1\" precision=\"FP32\" type=\"ReLU\">"
346 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
364 " <layer id=\"8\" name=\"fire2/expand3x3\" precision=\"FP32\" type=\"Convolution\">"
365 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"3\" kernel-y=\"3\" output=\"64\" pad-x=\"1\" pad-y=\"1\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
389 " <weights offset=\"15680\" size=\"36864\"/>"
390 " <biases offset=\"52544\" size=\"256\"/>"
393 " <layer id=\"9\" name=\"fire2/relu_expand3x3\" precision=\"FP32\" type=\"ReLU\">"
394 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
412 " <layer id=\"10\" name=\"fire2/concat\" precision=\"FP32\" type=\"Concat\">"
413 " <data axis=\"1\"/>"
437 " <layer id=\"11\" name=\"fire3/squeeze1x1\" precision=\"FP32\" type=\"Convolution\">"
438 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"16\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
462 " <weights offset=\"52800\" size=\"8192\"/>"
463 " <biases offset=\"60992\" size=\"64\"/>"
466 " <layer id=\"12\" name=\"fire3/relu_squeeze1x1\" precision=\"FP32\" type=\"ReLU\">"
467 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
485 " <layer id=\"13\" name=\"fire3/expand1x1\" precision=\"FP32\" type=\"Convolution\">"
486 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"64\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
504 " <weights offset=\"61056\" size=\"4096\"/>"
505 " <biases offset=\"65152\" size=\"256\"/>"
508 " <layer id=\"14\" name=\"fire3/relu_expand1x1\" precision=\"FP32\" type=\"ReLU\">"
509 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
527 " <layer id=\"15\" name=\"fire3/expand3x3\" precision=\"FP32\" type=\"Convolution\">"
528 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"3\" kernel-y=\"3\" output=\"64\" pad-x=\"1\" pad-y=\"1\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
546 " <weights offset=\"65408\" size=\"36864\"/>"
547 " <biases offset=\"102272\" size=\"256\"/>"
550 " <layer id=\"16\" name=\"fire3/relu_expand3x3\" precision=\"FP32\" type=\"ReLU\">"
551 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
569 " <layer id=\"17\" name=\"fire3/concat\" precision=\"FP32\" type=\"Concat\">"
570 " <data axis=\"1\"/>"
594 " <layer id=\"18\" name=\"pool3\" precision=\"FP32\" type=\"Pooling\">"
595 " <data exclude-pad=\"false\" kernel-x=\"3\" kernel-y=\"3\" pad-x=\"0\" pad-y=\"0\" pool-method=\"max\" rounding_type=\"ceil\" stride=\"1,1,2,2\" stride-x=\"2\" stride-y=\"2\"/>"
613 " <layer id=\"19\" name=\"fire4/squeeze1x1\" precision=\"FP32\" type=\"Convolution\">"
614 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"32\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
632 " <weights offset=\"102528\" size=\"16384\"/>"
633 " <biases offset=\"118912\" size=\"128\"/>"
636 " <layer id=\"20\" name=\"fire4/relu_squeeze1x1\" precision=\"FP32\" type=\"ReLU\">"
637 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
655 " <layer id=\"21\" name=\"fire4/expand1x1\" precision=\"FP32\" type=\"Convolution\">"
656 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"128\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
674 " <weights offset=\"119040\" size=\"16384\"/>"
675 " <biases offset=\"135424\" size=\"512\"/>"
678 " <layer id=\"22\" name=\"fire4/relu_expand1x1\" precision=\"FP32\" type=\"ReLU\">"
679 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
697 " <layer id=\"23\" name=\"fire4/expand3x3\" precision=\"FP32\" type=\"Convolution\">"
698 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"3\" kernel-y=\"3\" output=\"128\" pad-x=\"1\" pad-y=\"1\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
716 " <weights offset=\"135936\" size=\"147456\"/>"
717 " <biases offset=\"283392\" size=\"512\"/>"
720 " <layer id=\"24\" name=\"fire4/relu_expand3x3\" precision=\"FP32\" type=\"ReLU\">"
721 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
739 " <layer id=\"25\" name=\"fire4/concat\" precision=\"FP32\" type=\"Concat\">"
740 " <data axis=\"1\"/>"
764 " <layer id=\"26\" name=\"fire5/squeeze1x1\" precision=\"FP32\" type=\"Convolution\">"
765 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"32\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
783 " <weights offset=\"283904\" size=\"32768\"/>"
784 " <biases offset=\"316672\" size=\"128\"/>"
787 " <layer id=\"27\" name=\"fire5/relu_squeeze1x1\" precision=\"FP32\" type=\"ReLU\">"
788 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
806 " <layer id=\"28\" name=\"fire5/expand1x1\" precision=\"FP32\" type=\"Convolution\">"
807 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"128\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
825 " <weights offset=\"316800\" size=\"16384\"/>"
826 " <biases offset=\"333184\" size=\"512\"/>"
829 " <layer id=\"29\" name=\"fire5/relu_expand1x1\" precision=\"FP32\" type=\"ReLU\">"
830 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
848 " <layer id=\"30\" name=\"fire5/expand3x3\" precision=\"FP32\" type=\"Convolution\">"
849 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"3\" kernel-y=\"3\" output=\"128\" pad-x=\"1\" pad-y=\"1\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
867 " <weights offset=\"333696\" size=\"147456\"/>"
868 " <biases offset=\"481152\" size=\"512\"/>"
871 " <layer id=\"31\" name=\"fire5/relu_expand3x3\" precision=\"FP32\" type=\"ReLU\">"
872 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
890 " <layer id=\"32\" name=\"fire5/concat\" precision=\"FP32\" type=\"Concat\">"
891 " <data axis=\"1\"/>"
915 " <layer id=\"33\" name=\"pool5\" precision=\"FP32\" type=\"Pooling\">"
916 " <data exclude-pad=\"false\" kernel-x=\"3\" kernel-y=\"3\" pad-x=\"0\" pad-y=\"0\" pool-method=\"max\" rounding_type=\"ceil\" stride=\"1,1,2,2\" stride-x=\"2\" stride-y=\"2\"/>"
934 " <layer id=\"34\" name=\"fire6/squeeze1x1\" precision=\"FP32\" type=\"Convolution\">"
935 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"48\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
953 " <weights offset=\"481664\" size=\"49152\"/>"
954 " <biases offset=\"530816\" size=\"192\"/>"
957 " <layer id=\"35\" name=\"fire6/relu_squeeze1x1\" precision=\"FP32\" type=\"ReLU\">"
958 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
976 " <layer id=\"36\" name=\"fire6/expand1x1\" precision=\"FP32\" type=\"Convolution\">"
977 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"192\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
995 " <weights offset=\"531008\" size=\"36864\"/>"
996 " <biases offset=\"567872\" size=\"768\"/>"
999 " <layer id=\"37\" name=\"fire6/relu_expand1x1\" precision=\"FP32\" type=\"ReLU\">"
1000 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
1018 " <layer id=\"38\" name=\"fire6/expand3x3\" precision=\"FP32\" type=\"Convolution\">"
1019 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"3\" kernel-y=\"3\" output=\"192\" pad-x=\"1\" pad-y=\"1\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
1037 " <weights offset=\"568640\" size=\"331776\"/>"
1038 " <biases offset=\"900416\" size=\"768\"/>"
1041 " <layer id=\"39\" name=\"fire6/relu_expand3x3\" precision=\"FP32\" type=\"ReLU\">"
1042 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
1060 " <layer id=\"40\" name=\"fire6/concat\" precision=\"FP32\" type=\"Concat\">"
1061 " <data axis=\"1\"/>"
1085 " <layer id=\"41\" name=\"fire7/squeeze1x1\" precision=\"FP32\" type=\"Convolution\">"
1086 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"48\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
1104 " <weights offset=\"901184\" size=\"73728\"/>"
1105 " <biases offset=\"974912\" size=\"192\"/>"
1108 " <layer id=\"42\" name=\"fire7/relu_squeeze1x1\" precision=\"FP32\" type=\"ReLU\">"
1109 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
1127 " <layer id=\"43\" name=\"fire7/expand1x1\" precision=\"FP32\" type=\"Convolution\">"
1128 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"192\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
1146 " <weights offset=\"975104\" size=\"36864\"/>"
1147 " <biases offset=\"1011968\" size=\"768\"/>"
1150 " <layer id=\"44\" name=\"fire7/relu_expand1x1\" precision=\"FP32\" type=\"ReLU\">"
1151 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
1169 " <layer id=\"45\" name=\"fire7/expand3x3\" precision=\"FP32\" type=\"Convolution\">"
1170 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"3\" kernel-y=\"3\" output=\"192\" pad-x=\"1\" pad-y=\"1\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
1188 " <weights offset=\"1012736\" size=\"331776\"/>"
1189 " <biases offset=\"1344512\" size=\"768\"/>"
1192 " <layer id=\"46\" name=\"fire7/relu_expand3x3\" precision=\"FP32\" type=\"ReLU\">"
1193 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
1211 " <layer id=\"47\" name=\"fire7/concat\" precision=\"FP32\" type=\"Concat\">"
1212 " <data axis=\"1\"/>"
1236 " <layer id=\"48\" name=\"fire8/squeeze1x1\" precision=\"FP32\" type=\"Convolution\">"
1237 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"64\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
1255 " <weights offset=\"1345280\" size=\"98304\"/>"
1256 " <biases offset=\"1443584\" size=\"256\"/>"
1259 " <layer id=\"49\" name=\"fire8/relu_squeeze1x1\" precision=\"FP32\" type=\"ReLU\">"
1260 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
1278 " <layer id=\"50\" name=\"fire8/expand1x1\" precision=\"FP32\" type=\"Convolution\">"
1279 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"256\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
1297 " <weights offset=\"1443840\" size=\"65536\"/>"
1298 " <biases offset=\"1509376\" size=\"1024\"/>"
1301 " <layer id=\"51\" name=\"fire8/relu_expand1x1\" precision=\"FP32\" type=\"ReLU\">"
1302 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
1320 " <layer id=\"52\" name=\"fire8/expand3x3\" precision=\"FP32\" type=\"Convolution\">"
1321 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"3\" kernel-y=\"3\" output=\"256\" pad-x=\"1\" pad-y=\"1\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
1339 " <weights offset=\"1510400\" size=\"589824\"/>"
1340 " <biases offset=\"2100224\" size=\"1024\"/>"
1343 " <layer id=\"53\" name=\"fire8/relu_expand3x3\" precision=\"FP32\" type=\"ReLU\">"
1344 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
1362 " <layer id=\"54\" name=\"fire8/concat\" precision=\"FP32\" type=\"Concat\">"
1363 " <data axis=\"1\"/>"
1387 " <layer id=\"55\" name=\"fire9/squeeze1x1\" precision=\"FP32\" type=\"Convolution\">"
1388 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"64\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
1406 " <weights offset=\"2101248\" size=\"131072\"/>"
1407 " <biases offset=\"2232320\" size=\"256\"/>"
1410 " <layer id=\"56\" name=\"fire9/relu_squeeze1x1\" precision=\"FP32\" type=\"ReLU\">"
1411 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
1429 " <layer id=\"57\" name=\"fire9/expand1x1\" precision=\"FP32\" type=\"Convolution\">"
1430 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"256\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
1448 " <weights offset=\"2232576\" size=\"65536\"/>"
1449 " <biases offset=\"2298112\" size=\"1024\"/>"
1452 " <layer id=\"58\" name=\"fire9/relu_expand1x1\" precision=\"FP32\" type=\"ReLU\">"
1453 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
1471 " <layer id=\"59\" name=\"fire9/expand3x3\" precision=\"FP32\" type=\"Convolution\">"
1472 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"3\" kernel-y=\"3\" output=\"256\" pad-x=\"1\" pad-y=\"1\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
1490 " <weights offset=\"2299136\" size=\"589824\"/>"
1491 " <biases offset=\"2888960\" size=\"1024\"/>"
1494 " <layer id=\"60\" name=\"fire9/relu_expand3x3\" precision=\"FP32\" type=\"ReLU\">"
1495 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
1513 " <layer id=\"61\" name=\"fire9/concat\" precision=\"FP32\" type=\"Concat\">"
1514 " <data axis=\"1\"/>"
1538 " <layer id=\"62\" name=\"conv10\" precision=\"FP32\" type=\"Convolution\">"
1539 " <data dilation-x=\"1\" dilation-y=\"1\" group=\"1\" kernel-x=\"1\" kernel-y=\"1\" output=\"1000\" pad-x=\"0\" pad-y=\"0\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
1557 " <weights offset=\"2889984\" size=\"2048000\"/>"
1558 " <biases offset=\"4937984\" size=\"4000\"/>"
1561 " <layer id=\"63\" name=\"relu_conv10\" precision=\"FP32\" type=\"ReLU\">"
1562 " <data engine=\"caffe.ReLUParameter.DEFAULT\" negative_slope=\"0.0\"/>"
1580 " <layer id=\"64\" name=\"pool10\" precision=\"FP32\" type=\"Pooling\">"
1581 " <data exclude-pad=\"false\" kernel-x=\"14\" kernel-y=\"14\" pad-x=\"0\" pad-y=\"0\" pool-method=\"avg\" rounding_type=\"ceil\" stride=\"1,1,1,1\" stride-x=\"1\" stride-y=\"1\"/>"
1599 " <layer id=\"65\" name=\"prob\" precision=\"FP32\" type=\"SoftMax\">"
1600 " <data axis=\"1\"/>"
1620 " <edge from-layer=\"0\" from-port=\"0\" to-layer=\"1\" to-port=\"0\"/>"
1621 " <edge from-layer=\"1\" from-port=\"3\" to-layer=\"2\" to-port=\"0\"/>"
1622 " <edge from-layer=\"2\" from-port=\"1\" to-layer=\"3\" to-port=\"0\"/>"
1623 " <edge from-layer=\"3\" from-port=\"1\" to-layer=\"4\" to-port=\"0\"/>"
1624 " <edge from-layer=\"4\" from-port=\"3\" to-layer=\"5\" to-port=\"0\"/>"
1625 " <edge from-layer=\"5\" from-port=\"1\" to-layer=\"6\" to-port=\"0\"/>"
1626 " <edge from-layer=\"6\" from-port=\"3\" to-layer=\"7\" to-port=\"0\"/>"
1627 " <edge from-layer=\"5\" from-port=\"1\" to-layer=\"8\" to-port=\"0\"/>"
1628 " <edge from-layer=\"8\" from-port=\"3\" to-layer=\"9\" to-port=\"0\"/>"
1629 " <edge from-layer=\"7\" from-port=\"1\" to-layer=\"10\" to-port=\"0\"/>"
1630 " <edge from-layer=\"9\" from-port=\"1\" to-layer=\"10\" to-port=\"1\"/>"
1631 " <edge from-layer=\"10\" from-port=\"2\" to-layer=\"11\" to-port=\"0\"/>"
1632 " <edge from-layer=\"11\" from-port=\"3\" to-layer=\"12\" to-port=\"0\"/>"
1633 " <edge from-layer=\"11\" from-port=\"4\" to-layer=\"8\" to-port=\"1\"/>"
1634 " <edge from-layer=\"12\" from-port=\"1\" to-layer=\"13\" to-port=\"0\"/>"
1635 " <edge from-layer=\"13\" from-port=\"3\" to-layer=\"14\" to-port=\"0\"/>"
1636 " <edge from-layer=\"12\" from-port=\"1\" to-layer=\"15\" to-port=\"0\"/>"
1637 " <edge from-layer=\"15\" from-port=\"3\" to-layer=\"16\" to-port=\"0\"/>"
1638 " <edge from-layer=\"14\" from-port=\"1\" to-layer=\"17\" to-port=\"0\"/>"
1639 " <edge from-layer=\"16\" from-port=\"1\" to-layer=\"17\" to-port=\"1\"/>"
1640 " <edge from-layer=\"17\" from-port=\"2\" to-layer=\"18\" to-port=\"0\"/>"
1641 " <edge from-layer=\"18\" from-port=\"1\" to-layer=\"19\" to-port=\"0\"/>"
1642 " <edge from-layer=\"19\" from-port=\"3\" to-layer=\"20\" to-port=\"0\"/>"
1643 " <edge from-layer=\"20\" from-port=\"1\" to-layer=\"21\" to-port=\"0\"/>"
1644 " <edge from-layer=\"21\" from-port=\"3\" to-layer=\"22\" to-port=\"0\"/>"
1645 " <edge from-layer=\"20\" from-port=\"1\" to-layer=\"23\" to-port=\"0\"/>"
1646 " <edge from-layer=\"23\" from-port=\"3\" to-layer=\"24\" to-port=\"0\"/>"
1647 " <edge from-layer=\"22\" from-port=\"1\" to-layer=\"25\" to-port=\"0\"/>"
1648 " <edge from-layer=\"24\" from-port=\"1\" to-layer=\"25\" to-port=\"1\"/>"
1649 " <edge from-layer=\"25\" from-port=\"2\" to-layer=\"26\" to-port=\"0\"/>"
1650 " <edge from-layer=\"26\" from-port=\"3\" to-layer=\"27\" to-port=\"0\"/>"
1651 " <edge from-layer=\"27\" from-port=\"1\" to-layer=\"28\" to-port=\"0\"/>"
1652 " <edge from-layer=\"28\" from-port=\"3\" to-layer=\"29\" to-port=\"0\"/>"
1653 " <edge from-layer=\"27\" from-port=\"1\" to-layer=\"30\" to-port=\"0\"/>"
1654 " <edge from-layer=\"30\" from-port=\"3\" to-layer=\"31\" to-port=\"0\"/>"
1655 " <edge from-layer=\"29\" from-port=\"1\" to-layer=\"32\" to-port=\"0\"/>"
1656 " <edge from-layer=\"31\" from-port=\"1\" to-layer=\"32\" to-port=\"1\"/>"
1657 " <edge from-layer=\"32\" from-port=\"2\" to-layer=\"33\" to-port=\"0\"/>"
1658 " <edge from-layer=\"33\" from-port=\"1\" to-layer=\"34\" to-port=\"0\"/>"
1659 " <edge from-layer=\"34\" from-port=\"3\" to-layer=\"35\" to-port=\"0\"/>"
1660 " <edge from-layer=\"35\" from-port=\"1\" to-layer=\"36\" to-port=\"0\"/>"
1661 " <edge from-layer=\"36\" from-port=\"3\" to-layer=\"37\" to-port=\"0\"/>"
1662 " <edge from-layer=\"35\" from-port=\"1\" to-layer=\"38\" to-port=\"0\"/>"
1663 " <edge from-layer=\"38\" from-port=\"3\" to-layer=\"39\" to-port=\"0\"/>"
1664 " <edge from-layer=\"37\" from-port=\"1\" to-layer=\"40\" to-port=\"0\"/>"
1665 " <edge from-layer=\"39\" from-port=\"1\" to-layer=\"40\" to-port=\"1\"/>"
1666 " <edge from-layer=\"40\" from-port=\"2\" to-layer=\"41\" to-port=\"0\"/>"
1667 " <edge from-layer=\"41\" from-port=\"3\" to-layer=\"42\" to-port=\"0\"/>"
1668 " <edge from-layer=\"42\" from-port=\"1\" to-layer=\"43\" to-port=\"0\"/>"
1669 " <edge from-layer=\"43\" from-port=\"3\" to-layer=\"44\" to-port=\"0\"/>"
1670 " <edge from-layer=\"42\" from-port=\"1\" to-layer=\"45\" to-port=\"0\"/>"
1671 " <edge from-layer=\"45\" from-port=\"3\" to-layer=\"46\" to-port=\"0\"/>"
1672 " <edge from-layer=\"44\" from-port=\"1\" to-layer=\"47\" to-port=\"0\"/>"
1673 " <edge from-layer=\"46\" from-port=\"1\" to-layer=\"47\" to-port=\"1\"/>"
1674 " <edge from-layer=\"47\" from-port=\"2\" to-layer=\"48\" to-port=\"0\"/>"
1675 " <edge from-layer=\"48\" from-port=\"3\" to-layer=\"49\" to-port=\"0\"/>"
1676 " <edge from-layer=\"49\" from-port=\"1\" to-layer=\"50\" to-port=\"0\"/>"
1677 " <edge from-layer=\"50\" from-port=\"3\" to-layer=\"51\" to-port=\"0\"/>"
1678 " <edge from-layer=\"49\" from-port=\"1\" to-layer=\"52\" to-port=\"0\"/>"
1679 " <edge from-layer=\"52\" from-port=\"3\" to-layer=\"53\" to-port=\"0\"/>"
1680 " <edge from-layer=\"51\" from-port=\"1\" to-layer=\"54\" to-port=\"0\"/>"
1681 " <edge from-layer=\"53\" from-port=\"1\" to-layer=\"54\" to-port=\"1\"/>"
1682 " <edge from-layer=\"54\" from-port=\"2\" to-layer=\"55\" to-port=\"0\"/>"
1683 " <edge from-layer=\"55\" from-port=\"3\" to-layer=\"56\" to-port=\"0\"/>"
1684 " <edge from-layer=\"56\" from-port=\"1\" to-layer=\"57\" to-port=\"0\"/>"
1685 " <edge from-layer=\"57\" from-port=\"3\" to-layer=\"58\" to-port=\"0\"/>"
1686 " <edge from-layer=\"56\" from-port=\"1\" to-layer=\"59\" to-port=\"0\"/>"
1687 " <edge from-layer=\"59\" from-port=\"3\" to-layer=\"60\" to-port=\"0\"/>"
1688 " <edge from-layer=\"58\" from-port=\"1\" to-layer=\"61\" to-port=\"0\"/>"
1689 " <edge from-layer=\"60\" from-port=\"1\" to-layer=\"61\" to-port=\"1\"/>"
1690 " <edge from-layer=\"61\" from-port=\"2\" to-layer=\"62\" to-port=\"0\"/>"
1691 " <edge from-layer=\"62\" from-port=\"3\" to-layer=\"63\" to-port=\"0\"/>"
1692 " <edge from-layer=\"63\" from-port=\"1\" to-layer=\"64\" to-port=\"0\"/>"
1693 " <edge from-layer=\"64\" from-port=\"1\" to-layer=\"65\" to-port=\"0\"/>"
1696 CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
1698 ASSERT_EQ(GENERAL_ERROR, reader.ReadNetwork(model.data(), model.length(), &resp));
1701 TEST_F(CNNNetReaderImplTest, canRead3DConvolution) {
1703 "<net batch=\"1\" name=\"Convolution_only\" version=\"3\">"
1705 " <layer id=\"0\" name=\"1\" precision=\"FP32\" type=\"Input\">"
1716 " <layer id=\"1\" name=\"3D_conv\" precision=\"FP32\" type=\"Convolution\">"
1717 " <data dilations=\"1,3,5\" group=\"1\" kernel=\"1,3,5\" output=\"64\" pads_begin=\"1,3,5\" pads_end=\"1,3,5\" strides=\"1,3,5\"/>"
1737 " <weights offset=\"0\" size=\"263424\"/>"
1738 " <biases offset=\"263424\" size=\"256\"/>"
1743 " <edge from-layer=\"0\" from-port=\"0\" to-layer=\"1\" to-port=\"0\"/>"
1747 CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
1748 ASSERT_EQ(OK, reader.ReadNetwork(model.data(), model.length(), &resp));
1750 auto network = reader.getNetwork(&resp);
1753 ASSERT_EQ(OK, network->getLayerByName("3D_conv", layer, nullptr));
1754 auto* conv = dynamic_cast<ConvolutionLayer*>(layer.get());
1755 ASSERT_NE(nullptr, conv);
1756 ASSERT_EQ(conv->_kernel[X_AXIS], 5);
1757 ASSERT_EQ(conv->_kernel[Y_AXIS], 3);
1758 ASSERT_EQ(conv->_kernel[Z_AXIS], 1);
1759 ASSERT_EQ(conv->_dilation[X_AXIS], 5);
1760 ASSERT_EQ(conv->_dilation[Y_AXIS], 3);
1761 ASSERT_EQ(conv->_dilation[Z_AXIS], 1);
1762 ASSERT_EQ(conv->_stride[X_AXIS], 5);
1763 ASSERT_EQ(conv->_stride[Y_AXIS], 3);
1764 ASSERT_EQ(conv->_stride[Z_AXIS], 1);
1765 ASSERT_EQ(conv->_padding[X_AXIS], 5);
1766 ASSERT_EQ(conv->_padding[Y_AXIS], 3);
1767 ASSERT_EQ(conv->_padding[Z_AXIS], 1);
1768 ASSERT_EQ(conv->_pads_end[X_AXIS], 5);
1769 ASSERT_EQ(conv->_pads_end[Y_AXIS], 3);
1770 ASSERT_EQ(conv->_pads_end[Z_AXIS], 1);
1773 TEST_F(CNNNetReaderImplTest, canRead3DPooling) {
1775 "<net batch=\"1\" name=\"Pooling_only\" version=\"3\">"
1777 " <layer id=\"0\" name=\"1\" precision=\"FP32\" type=\"Input\">"
1788 " <layer id=\"1\" name=\"3D_pooling\" precision=\"FP32\" type=\"Pooling\">"
1789 " <data exclude-pad=\"true\" kernel=\"1,3,5\" pads_begin=\"1,3,5\" pads_end=\"1,3,5\" pool-method=\"max\" rounding_type=\"ceil\" strides=\"1,3,5\"/>"
1811 " <edge from-layer=\"0\" from-port=\"0\" to-layer=\"1\" to-port=\"0\"/>"
1815 CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
1816 ASSERT_EQ(OK, reader.ReadNetwork(model.data(), model.length(), &resp));
1818 auto network = reader.getNetwork(&resp);
1822 ASSERT_EQ(OK, network->getLayerByName("3D_pooling", layer, nullptr));
1823 auto* pool = dynamic_cast<PoolingLayer*>(layer.get());
1824 ASSERT_NE(nullptr, pool);
1825 ASSERT_EQ(pool->_kernel[X_AXIS], 5);
1826 ASSERT_EQ(pool->_kernel[Y_AXIS], 3);
1827 ASSERT_EQ(pool->_kernel[Z_AXIS], 1);
1828 ASSERT_EQ(pool->_stride[X_AXIS], 5);
1829 ASSERT_EQ(pool->_stride[Y_AXIS], 3);
1830 ASSERT_EQ(pool->_stride[Z_AXIS], 1);
1831 ASSERT_EQ(pool->_padding[X_AXIS], 5);
1832 ASSERT_EQ(pool->_padding[Y_AXIS], 3);
1833 ASSERT_EQ(pool->_padding[Z_AXIS], 1);
1834 ASSERT_EQ(pool->_pads_end[X_AXIS], 5);
1835 ASSERT_EQ(pool->_pads_end[Y_AXIS], 3);
1836 ASSERT_EQ(pool->_pads_end[Z_AXIS], 1);
1839 TEST_F(CNNNetReaderImplTest, canParseWithoutInput_1to2) {
1840 std::string model = R"V0G0N(
1841 <net batch="1" name="SimpleNet" version="2">
1843 <layer id="1" name="Boo" precision="FP32" type="Split">
1844 <data operation="sum"/>
1866 CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
1867 sts = reader.ReadNetwork(model.data(), model.length(), &resp);
1868 ASSERT_EQ(GENERAL_ERROR, sts) << resp.msg;
1871 TEST_F(CNNNetReaderImplTest, canParseWithoutInput_2to1) {
1872 std::string model = R"V0G0N(
1873 <net batch="1" name="SimpleNet" version="2">
1875 <layer id="1" name="Foo" precision="FP32" type="Eltwise">
1876 <data operation="sum"/>
1898 CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
1899 sts = reader.ReadNetwork(model.data(), model.length(), &resp);
1900 ASSERT_EQ(GENERAL_ERROR, sts) << resp.msg;
1903 TEST_F(CNNNetReaderImplTest, canParseSimpleTI) {
1904 std::string model = R"V0G0N(
1905 <net batch="1" name="Simple_TI" version="4">
1907 <layer id="0" name="input" precision="FP32" type="Input">
1916 <layer id="1" name="Bias" precision="FP32" type="Const">
1924 <custom offset="0" size="64"/>
1927 <layer id="2" name="SomeTI" precision="FP32" type="TensorIterator">
1947 <input external_port_id="0" internal_layer_id="0" internal_port_id="0" axis="1" />
1948 <input external_port_id="1" internal_layer_id="1" internal_port_id="1"/>
1949 <output external_port_id="3" internal_layer_id="2" internal_port_id="1" axis="1" />
1952 <edge from-layer="1" from-port="2" to-layer="1" to-port="1"/>
1956 <layer id="0" name="TI_reshape_in" precision="FP32" type="Reshape">
1957 <data axis="0" dim="1,512" num_axes="-1"/>
1972 <layer id="1" name="TI_sum" precision="FP32" type="Eltwise">
1973 <data operation="sum"/>
1991 <layer id="2" name="TI_reshape_out" precision="FP32" type="Reshape">
1992 <data axis="0" dim="1,1,256" num_axes="-1"/>
2009 <edge from-layer="0" from-port="1" to-layer="1" to-port="0"/>
2010 <edge from-layer="1" from-port="2" to-layer="2" to-port="0"/>
2016 <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
2017 <edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
2022 CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
2023 sts = reader.ReadNetwork(model.data(), model.length(), &resp);
2024 ASSERT_EQ(OK, sts) << resp.msg;
2026 auto network = reader.getNetwork(&resp);
2027 ASSERT_NE(nullptr, network) << resp.msg;
2030 sts = network->getLayerByName("SomeTI", layer, &resp);
2031 ASSERT_EQ(OK, sts) << resp.msg;
2033 auto* ti = dynamic_cast<TensorIterator*>(layer.get());
2034 ASSERT_NE(nullptr, ti);
2035 ASSERT_EQ(ti->type, "TensorIterator");
2037 // Check Input port mapping
2038 ASSERT_EQ(ti->input_port_map.size(), 2);
2039 int i = ti->input_port_map[0].axis == 1 ? 0 : 1;
2040 ASSERT_EQ(ti->input_port_map[i].axis, 1);
2041 ASSERT_EQ(ti->input_port_map[i].stride, 1);
2042 ASSERT_EQ(ti->input_port_map[i].start, 0);
2043 ASSERT_EQ(ti->input_port_map[i].end, -1);
2044 ASSERT_EQ(ti->input_port_map[i].part_size, 1);
2045 ASSERT_EQ(ti->input_port_map[1 - i].axis, -1);
2046 ASSERT_EQ(ti->input_port_map[1 - i].stride, 1);
2047 ASSERT_EQ(ti->input_port_map[1 - i].start, 0);
2048 ASSERT_EQ(ti->input_port_map[1 - i].end, -1);
2049 ASSERT_EQ(ti->input_port_map[1 - i].part_size, 1);
2051 // Check Output port mapping
2052 ASSERT_EQ(ti->output_port_map.size(), 1);
2053 ASSERT_EQ(ti->output_port_map[0].axis, 1);
2054 ASSERT_EQ(ti->output_port_map[0].stride, 1);
2055 ASSERT_EQ(ti->output_port_map[0].start, 0);
2056 ASSERT_EQ(ti->output_port_map[0].end, -1);
2057 ASSERT_EQ(ti->output_port_map[0].part_size, 1);
2060 ASSERT_EQ(ti->back_edges.size(), 1);
2061 ASSERT_EQ(ti->back_edges[0].from, 0);
2062 ASSERT_EQ(ti->back_edges[0].to, 1);
2063 ASSERT_EQ(ti->back_edges[0].axis, -1);
2064 ASSERT_EQ(ti->back_edges[0].stride, 1);
2065 ASSERT_EQ(ti->back_edges[0].start, 0);
2066 ASSERT_EQ(ti->back_edges[0].end, -1);
2067 ASSERT_EQ(ti->back_edges[0].part_size, 1);
2070 TEST_F(CNNNetReaderImplTest, canParseScalar) {
2071 std::string model = R"V0G0N(
2072 <net batch="1" name="SimpleNet" version="2">
2074 <layer id="0" name="input" precision="FP32" type="Input">
2083 <layer id="1" name="scalar" precision="FP32" type="Const">
2088 <custom offset="0" size="4"/>
2091 <layer id="2" name="reshape" precision="FP32" type="Reshape">
2108 <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
2109 <edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
2114 CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
2115 sts = reader.ReadNetwork(model.data(), model.length(), &resp);
2116 ASSERT_EQ(OK, sts) << resp.msg;
2117 auto blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {4}, Layout::C));
2119 auto buffer = blob->buffer().as<float*>();
2120 float SCALAR_VALUE = 90;
2121 buffer[0] = SCALAR_VALUE;
2123 sts = reader.SetWeights(blob, &resp);
2124 ASSERT_EQ(OK, sts) << resp.msg;
2126 auto net = reader.getNetwork(&resp);
2128 ASSERT_NE(nullptr, net) << resp.msg;
2130 sts = net->getLayerByName("scalar", layer, &resp);
2131 ASSERT_EQ(OK, sts) << resp.msg;
2132 ASSERT_NE(nullptr, layer.get());
2133 ASSERT_EQ(layer->type, "Const");
2134 auto actualBlob = layer->blobs.begin()->second;
2135 ASSERT_EQ(actualBlob->buffer().as<float*>()[0], SCALAR_VALUE);
2136 auto scalarDesc = layer->outData[0]->getTensorDesc();
2137 ASSERT_TRUE(scalarDesc.getDims().empty());
2138 ASSERT_EQ(scalarDesc.getLayout(), SCALAR);
2139 ASSERT_EQ(scalarDesc.getPrecision(), Precision::FP32);