-// Copyright (C) 2018 Intel Corporation
+// Copyright (C) 2018-2019 Intel Corporation
// SPDX-License-Identifier: Apache-2.0
//
#include <gmock/gmock-more-actions.h>
#include "cnn_network_impl.hpp"
#include "mock_iformat_parser.hpp"
+#include <test_assertions.hpp>
+#include <single_layer_common.hpp>
using namespace testing;
using namespace InferenceEngine;
MockFormatParserCreator() {
_parser = make_shared<MockIFormatParser>();
}
+
std::shared_ptr<IFormatParser> create(int version) override {
return _parser;
}
TEST_F(CNNNetReaderImplTest, canRead3DConvolution) {
std::string model =
- "<net batch=\"1\" name=\"Convolution_only\" version=\"3\">"
- " <layers>"
- " <layer id=\"0\" name=\"1\" precision=\"FP32\" type=\"Input\">"
- " <output>"
- " <port id=\"0\">"
- " <dim>1</dim>"
- " <dim>3</dim>"
- " <dim>16</dim>"
- " <dim>112</dim>"
- " <dim>112</dim>"
- " </port>"
- " </output>"
- " </layer>"
- " <layer id=\"1\" name=\"3D_conv\" precision=\"FP32\" type=\"Convolution\">"
- " <data dilations=\"1,3,5\" group=\"1\" kernel=\"1,3,5\" output=\"64\" pads_begin=\"1,3,5\" pads_end=\"1,3,5\" strides=\"1,3,5\"/>"
- " <input>"
- " <port id=\"0\">"
- " <dim>1</dim>"
- " <dim>3</dim>"
- " <dim>16</dim>"
- " <dim>112</dim>"
- " <dim>112</dim>"
- " </port>"
- " </input>"
- " <output>"
- " <port id=\"1\">"
- " <dim>1</dim>"
- " <dim>64</dim>"
- " <dim>16</dim>"
- " <dim>56</dim>"
- " <dim>56</dim>"
- " </port>"
- " </output>"
- " <blobs>"
- " <weights offset=\"0\" size=\"263424\"/>"
- " <biases offset=\"263424\" size=\"256\"/>"
- " </blobs>"
- " </layer>"
- " </layers>"
- " <edges>"
- " <edge from-layer=\"0\" from-port=\"0\" to-layer=\"1\" to-port=\"0\"/>"
- " </edges>"
- "</net>";
+ "<net batch=\"1\" name=\"Convolution_only\" version=\"3\">"
+ " <layers>"
+ " <layer id=\"0\" name=\"1\" precision=\"FP32\" type=\"Input\">"
+ " <output>"
+ " <port id=\"0\">"
+ " <dim>1</dim>"
+ " <dim>3</dim>"
+ " <dim>16</dim>"
+ " <dim>112</dim>"
+ " <dim>112</dim>"
+ " </port>"
+ " </output>"
+ " </layer>"
+ " <layer id=\"1\" name=\"3D_conv\" precision=\"FP32\" type=\"Convolution\">"
+ " <data dilations=\"1,3,5\" group=\"1\" kernel=\"1,3,5\" output=\"64\" pads_begin=\"1,3,5\" pads_end=\"1,3,5\" strides=\"1,3,5\"/>"
+ " <input>"
+ " <port id=\"0\">"
+ " <dim>1</dim>"
+ " <dim>3</dim>"
+ " <dim>16</dim>"
+ " <dim>112</dim>"
+ " <dim>112</dim>"
+ " </port>"
+ " </input>"
+ " <output>"
+ " <port id=\"1\">"
+ " <dim>1</dim>"
+ " <dim>64</dim>"
+ " <dim>16</dim>"
+ " <dim>56</dim>"
+ " <dim>56</dim>"
+ " </port>"
+ " </output>"
+ " <blobs>"
+ " <weights offset=\"0\" size=\"263424\"/>"
+ " <biases offset=\"263424\" size=\"256\"/>"
+ " </blobs>"
+ " </layer>"
+ " </layers>"
+ " <edges>"
+ " <edge from-layer=\"0\" from-port=\"0\" to-layer=\"1\" to-port=\"0\"/>"
+ " </edges>"
+ "</net>";
CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
ASSERT_EQ(OK, reader.ReadNetwork(model.data(), model.length(), &resp));
CNNLayerPtr layer;
ASSERT_EQ(OK, network->getLayerByName("3D_conv", layer, nullptr));
- auto *conv = dynamic_cast<ConvolutionLayer *>(layer.get());
+ auto* conv = dynamic_cast<ConvolutionLayer*>(layer.get());
ASSERT_NE(nullptr, conv);
ASSERT_EQ(conv->_kernel[X_AXIS], 5);
ASSERT_EQ(conv->_kernel[Y_AXIS], 3);
TEST_F(CNNNetReaderImplTest, canRead3DPooling) {
std::string model =
- "<net batch=\"1\" name=\"Pooling_only\" version=\"3\">"
- " <layers>"
- " <layer id=\"0\" name=\"1\" precision=\"FP32\" type=\"Input\">"
- " <output>"
- " <port id=\"0\">"
- " <dim>1</dim>"
- " <dim>3</dim>"
- " <dim>16</dim>"
- " <dim>112</dim>"
- " <dim>112</dim>"
- " </port>"
- " </output>"
- " </layer>"
- " <layer id=\"1\" name=\"3D_pooling\" precision=\"FP32\" type=\"Pooling\">"
- " <data exclude-pad=\"true\" kernel=\"1,3,5\" pads_begin=\"1,3,5\" pads_end=\"1,3,5\" pool-method=\"max\" rounding_type=\"ceil\" strides=\"1,3,5\"/>"
- " <input>"
- " <port id=\"0\">"
- " <dim>1</dim>"
- " <dim>3</dim>"
- " <dim>16</dim>"
- " <dim>112</dim>"
- " <dim>112</dim>"
- " </port>"
- " </input>"
- " <output>"
- " <port id=\"1\">"
- " <dim>1</dim>"
- " <dim>64</dim>"
- " <dim>8</dim>"
- " <dim>28</dim>"
- " <dim>28</dim>"
- " </port>"
- " </output>"
- " </layer>"
- " </layers>"
- " <edges>"
- " <edge from-layer=\"0\" from-port=\"0\" to-layer=\"1\" to-port=\"0\"/>"
- " </edges>"
- "</net>";
+ "<net batch=\"1\" name=\"Pooling_only\" version=\"3\">"
+ " <layers>"
+ " <layer id=\"0\" name=\"1\" precision=\"FP32\" type=\"Input\">"
+ " <output>"
+ " <port id=\"0\">"
+ " <dim>1</dim>"
+ " <dim>3</dim>"
+ " <dim>16</dim>"
+ " <dim>112</dim>"
+ " <dim>112</dim>"
+ " </port>"
+ " </output>"
+ " </layer>"
+ " <layer id=\"1\" name=\"3D_pooling\" precision=\"FP32\" type=\"Pooling\">"
+ " <data exclude-pad=\"true\" kernel=\"1,3,5\" pads_begin=\"1,3,5\" pads_end=\"1,3,5\" pool-method=\"max\" rounding_type=\"ceil\" strides=\"1,3,5\"/>"
+ " <input>"
+ " <port id=\"0\">"
+ " <dim>1</dim>"
+ " <dim>3</dim>"
+ " <dim>16</dim>"
+ " <dim>112</dim>"
+ " <dim>112</dim>"
+ " </port>"
+ " </input>"
+ " <output>"
+ " <port id=\"1\">"
+ " <dim>1</dim>"
+ " <dim>64</dim>"
+ " <dim>8</dim>"
+ " <dim>28</dim>"
+ " <dim>28</dim>"
+ " </port>"
+ " </output>"
+ " </layer>"
+ " </layers>"
+ " <edges>"
+ " <edge from-layer=\"0\" from-port=\"0\" to-layer=\"1\" to-port=\"0\"/>"
+ " </edges>"
+ "</net>";
CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
ASSERT_EQ(OK, reader.ReadNetwork(model.data(), model.length(), &resp));
CNNLayerPtr layer;
ASSERT_EQ(OK, network->getLayerByName("3D_pooling", layer, nullptr));
- auto *pool = dynamic_cast<PoolingLayer *>(layer.get());
+ auto* pool = dynamic_cast<PoolingLayer*>(layer.get());
ASSERT_NE(nullptr, pool);
ASSERT_EQ(pool->_kernel[X_AXIS], 5);
ASSERT_EQ(pool->_kernel[Y_AXIS], 3);
CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
sts = reader.ReadNetwork(model.data(), model.length(), &resp);
- ASSERT_EQ(OK, sts) << resp.msg;
-
- auto net = reader.getNetwork(&resp);
- ASSERT_NE(nullptr, net ) << resp.msg;
-
- InputsDataMap in_map;
- OutputsDataMap out_map;
- net->getInputsInfo(in_map);
- net->getOutputsInfo(out_map);
-
- ASSERT_EQ(in_map.size(), 1); auto i = in_map.begin();
- ASSERT_EQ(i++->second->name(), "Boo");
-
- ASSERT_EQ(out_map.size(), 2); auto o = out_map.begin();
- ASSERT_EQ(o++->second->getName(), "Boo.0");
- ASSERT_EQ(o++->second->getName(), "Boo.1");
+ ASSERT_EQ(GENERAL_ERROR, sts) << resp.msg;
}
TEST_F(CNNNetReaderImplTest, canParseWithoutInput_2to1) {
CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
sts = reader.ReadNetwork(model.data(), model.length(), &resp);
- ASSERT_EQ(OK, sts) << resp.msg;
-
- auto net = reader.getNetwork(&resp);
- ASSERT_NE(nullptr, net ) << resp.msg;
-
- InputsDataMap in_map;
- OutputsDataMap out_map;
- net->getInputsInfo(in_map);
- net->getOutputsInfo(out_map);
-
- ASSERT_EQ(in_map.size(), 2); auto i = in_map.begin();
- ASSERT_EQ(i++->second->name(), "Foo.0");
- ASSERT_EQ(i++->second->name(), "Foo.1");
-
- ASSERT_EQ(out_map.size(), 1); auto o = out_map.begin();
- ASSERT_EQ(o++->second->getName(), "Foo");
+ ASSERT_EQ(GENERAL_ERROR, sts) << resp.msg;
}
TEST_F(CNNNetReaderImplTest, canParseSimpleTI) {
- std::string model = R"V0G0N(
+ std::string model = R"V0G0N(
<net batch="1" name="Simple_TI" version="4">
<layers>
<layer id="0" name="input" precision="FP32" type="Input">
</net>
)V0G0N";
- CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
- sts = reader.ReadNetwork(model.data(), model.length(), &resp);
- ASSERT_EQ(OK, sts) << resp.msg;
+ CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
+ sts = reader.ReadNetwork(model.data(), model.length(), &resp);
+ ASSERT_EQ(OK, sts) << resp.msg;
- auto network = reader.getNetwork(&resp);
- ASSERT_NE(nullptr, network ) << resp.msg;
+ auto network = reader.getNetwork(&resp);
+ ASSERT_NE(nullptr, network) << resp.msg;
- CNNLayerPtr layer;
- sts = network->getLayerByName("SomeTI", layer, &resp);
- ASSERT_EQ(OK, sts) << resp.msg;
+ CNNLayerPtr layer;
+ sts = network->getLayerByName("SomeTI", layer, &resp);
+ ASSERT_EQ(OK, sts) << resp.msg;
+
+ auto* ti = dynamic_cast<TensorIterator*>(layer.get());
+ ASSERT_NE(nullptr, ti);
+ ASSERT_EQ(ti->type, "TensorIterator");
+
+ // Check Input port mapping
+ ASSERT_EQ(ti->input_port_map.size(), 2);
+ int i = ti->input_port_map[0].axis == 1 ? 0 : 1;
+ ASSERT_EQ(ti->input_port_map[i].axis, 1);
+ ASSERT_EQ(ti->input_port_map[i].stride, 1);
+ ASSERT_EQ(ti->input_port_map[i].start, 0);
+ ASSERT_EQ(ti->input_port_map[i].end, -1);
+ ASSERT_EQ(ti->input_port_map[i].part_size, 1);
+ ASSERT_EQ(ti->input_port_map[1 - i].axis, -1);
+ ASSERT_EQ(ti->input_port_map[1 - i].stride, 1);
+ ASSERT_EQ(ti->input_port_map[1 - i].start, 0);
+ ASSERT_EQ(ti->input_port_map[1 - i].end, -1);
+ ASSERT_EQ(ti->input_port_map[1 - i].part_size, 1);
+
+ // Check Output port mapping
+ ASSERT_EQ(ti->output_port_map.size(), 1);
+ ASSERT_EQ(ti->output_port_map[0].axis, 1);
+ ASSERT_EQ(ti->output_port_map[0].stride, 1);
+ ASSERT_EQ(ti->output_port_map[0].start, 0);
+ ASSERT_EQ(ti->output_port_map[0].end, -1);
+ ASSERT_EQ(ti->output_port_map[0].part_size, 1);
+
+ // No back edges
+ ASSERT_EQ(ti->back_edges.size(), 1);
+ ASSERT_EQ(ti->back_edges[0].from, 0);
+ ASSERT_EQ(ti->back_edges[0].to, 1);
+ ASSERT_EQ(ti->back_edges[0].axis, -1);
+ ASSERT_EQ(ti->back_edges[0].stride, 1);
+ ASSERT_EQ(ti->back_edges[0].start, 0);
+ ASSERT_EQ(ti->back_edges[0].end, -1);
+ ASSERT_EQ(ti->back_edges[0].part_size, 1);
+}
+
+TEST_F(CNNNetReaderImplTest, canParseScalar) {
+ std::string model = R"V0G0N(
+<net batch="1" name="SimpleNet" version="2">
+ <layers>
+ <layer id="0" name="input" precision="FP32" type="Input">
+ <output>
+ <port id="0">
+ <dim>1</dim>
+ <dim>5</dim>
+ <dim>16</dim>
+ </port>
+ </output>
+ </layer>
+ <layer id="1" name="scalar" precision="FP32" type="Const">
+ <output>
+ <port id="0"/>
+ </output>
+ <blobs>
+ <custom offset="0" size="4"/>
+ </blobs>
+ </layer>
+ <layer id="2" name="reshape" precision="FP32" type="Reshape">
+ <input>
+ <port id="0">
+ <dim>1</dim>
+ <dim>5</dim>
+ <dim>16</dim>
+ </port>
+ <port id="1"/>
+ </input>
+ <output>
+ <port id="2">
+ <dim>90</dim>
+ </port>
+ </output>
+ </layer>
+ </layers>
+ <edges>
+ <edge from-layer="0" from-port="0" to-layer="2" to-port="0"/>
+ <edge from-layer="1" from-port="0" to-layer="2" to-port="1"/>
+ </edges>
+</net>
+ )V0G0N";
- auto *ti = dynamic_cast<TensorIterator*>(layer.get());
- ASSERT_NE(nullptr, ti);
- ASSERT_EQ(ti->type, "TensorIterator");
+ CNNNetReaderImpl reader(make_shared<V2FormatParserCreator>());
+ sts = reader.ReadNetwork(model.data(), model.length(), &resp);
+ ASSERT_EQ(OK, sts) << resp.msg;
+ auto blob = make_shared_blob<uint8_t>(TensorDesc(Precision::U8, {4}, Layout::C));
+ blob->allocate();
+ auto buffer = blob->buffer().as<float*>();
+ float SCALAR_VALUE = 90;
+ buffer[0] = SCALAR_VALUE;
- // Check Input port mapping
- ASSERT_EQ(ti->input_port_map.size(), 2);
- int i = ti->input_port_map[0].axis == 1 ? 0 : 1;
- ASSERT_EQ(ti->input_port_map[i].axis, 1);
- ASSERT_EQ(ti->input_port_map[i].stride, 1);
- ASSERT_EQ(ti->input_port_map[i].start, 0);
- ASSERT_EQ(ti->input_port_map[i].end, -1);
- ASSERT_EQ(ti->input_port_map[i].part_size, 1);
- ASSERT_EQ(ti->input_port_map[1-i].axis, -1);
- ASSERT_EQ(ti->input_port_map[1-i].stride, 1);
- ASSERT_EQ(ti->input_port_map[1-i].start, 0);
- ASSERT_EQ(ti->input_port_map[1-i].end, -1);
- ASSERT_EQ(ti->input_port_map[1-i].part_size, 1);
+ sts = reader.SetWeights(blob, &resp);
+ ASSERT_EQ(OK, sts) << resp.msg;
- // Check Output port mapping
- ASSERT_EQ(ti->output_port_map.size(), 1);
- ASSERT_EQ(ti->output_port_map[0].axis, 1);
- ASSERT_EQ(ti->output_port_map[0].stride, 1);
- ASSERT_EQ(ti->output_port_map[0].start, 0);
- ASSERT_EQ(ti->output_port_map[0].end, -1);
- ASSERT_EQ(ti->output_port_map[0].part_size, 1);
+ auto net = reader.getNetwork(&resp);
- // No back edges
- ASSERT_EQ(ti->back_edges.size(), 1);
- ASSERT_EQ(ti->back_edges[0].from, 0);
- ASSERT_EQ(ti->back_edges[0].to, 1);
- ASSERT_EQ(ti->back_edges[0].axis, -1);
- ASSERT_EQ(ti->back_edges[0].stride, 1);
- ASSERT_EQ(ti->back_edges[0].start, 0);
- ASSERT_EQ(ti->back_edges[0].end, -1);
- ASSERT_EQ(ti->back_edges[0].part_size, 1);
+ ASSERT_NE(nullptr, net) << resp.msg;
+ CNNLayerPtr layer;
+ sts = net->getLayerByName("scalar", layer, &resp);
+ ASSERT_EQ(OK, sts) << resp.msg;
+ ASSERT_NE(nullptr, layer.get());
+ ASSERT_EQ(layer->type, "Const");
+ auto actualBlob = layer->blobs.begin()->second;
+ ASSERT_EQ(actualBlob->buffer().as<float*>()[0], SCALAR_VALUE);
+ auto scalarDesc = layer->outData[0]->getTensorDesc();
+ ASSERT_TRUE(scalarDesc.getDims().empty());
+ ASSERT_EQ(scalarDesc.getLayout(), SCALAR);
+ ASSERT_EQ(scalarDesc.getPrecision(), Precision::FP32);
}