NNXSW-1826 Move tests for Optimization classes to separate files
authorRob Hughes <robert.hughes@arm.com>
Tue, 24 Sep 2019 08:34:53 +0000 (09:34 +0100)
committerJim Flynn Arm <jim.flynn@arm.com>
Wed, 25 Sep 2019 02:44:48 +0000 (02:44 +0000)
This splits up the >1000 line OptimizerTests.cpp file.

Each Optimization class now has its own test file, all of which are in a
subfolder of tests called "optimizations".

The original OptimizerTests.cpp now contains mostly (completely?) tests
for validating output shapes, which perhaps should be moved to
test files specific to the layer types they are testing.

Change-Id: Icd1196cad8b720abcb156921aab1adbd4026756b
Signed-off-by: Rob Hughes <robert.hughes@arm.com>
13 files changed:
CMakeLists.txt
src/armnn/test/OptimizerTests.cpp
src/armnn/test/TestUtils.hpp
src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp [new file with mode: 0644]
src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp [new file with mode: 0644]
src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp [new file with mode: 0644]
src/armnn/test/optimizations/InsertDebugLayerTests.cpp [new file with mode: 0644]
src/armnn/test/optimizations/MovePermuteUpTests.cpp [new file with mode: 0644]
src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp [new file with mode: 0644]
src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp [new file with mode: 0644]
src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp [new file with mode: 0644]
src/armnn/test/optimizations/PermuteAsReshapeTests.cpp [new file with mode: 0644]
src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp [new file with mode: 0644]

index 3da7e8b..65efe12 100644 (file)
@@ -550,6 +550,16 @@ if(BUILD_UNIT_TESTS)
         src/armnn/test/NetworkTests.cpp
         src/armnn/test/ObservableTest.cpp
         src/armnn/test/OptimizerTests.cpp
+        src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+        src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+        src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
+        src/armnn/test/optimizations/InsertDebugLayerTests.cpp
+        src/armnn/test/optimizations/MovePermuteUpTests.cpp
+        src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
+        src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
+        src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
+        src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
+        src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
         src/armnn/test/OptionalTest.cpp
         src/armnn/test/ProfilerTests.cpp
         src/armnn/test/ProfilingEventTest.cpp
index b06403c..c0ad9c8 100644 (file)
@@ -17,45 +17,6 @@ using namespace armnn;
 
 namespace
 {
-template <typename LayerT>
-bool IsLayerOfType(const armnn::Layer* const layer)
-{
-    return (layer->GetType() == armnn::LayerEnumOf<LayerT>());
-}
-
-bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
-{
-    return (first == last);
-}
-
-/// Checks each unary function in Us evaluates true for each correspondent layer in the sequence [first, last).
-template <typename U, typename... Us>
-bool CheckSequence(const armnn::Graph::ConstIterator first,
-                   const armnn::Graph::ConstIterator last,
-                   U&& u,
-                   Us&&... us)
-{
-    return u(*first) && CheckSequence(std::next(first), last, us...);
-}
-
-template <typename LayerT>
-bool CheckRelatedLayers(armnn::Graph& graph, const std::list<std::string>& testRelatedLayers)
-{
-    for (auto& layer : graph)
-    {
-        if (layer->GetType() == armnn::LayerEnumOf<LayerT>())
-        {
-            auto& relatedLayers = layer->GetRelatedLayerNames();
-            if(!std::equal(relatedLayers.begin(), relatedLayers.end(),
-                           testRelatedLayers.begin(), testRelatedLayers.end()))
-            {
-                return false;
-            }
-        }
-    }
-
-    return true;
-}
 
 void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
 {
@@ -168,38 +129,6 @@ void CreateLSTMLayerHelper(Graph &graph, bool CifgEnabled)
 BOOST_AUTO_TEST_SUITE(Optimizer)
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(OptimizeInversePermutesTest)
-{
-    armnn::Graph graph;
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
-
-    graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input");
-
-    // Inserts two permutes, one the inverse of the other.
-    graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0),
-                                              armnn::PermuteDescriptor({0, 2, 3, 1}),
-                                              "perm0231");
-    graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0),
-                                              armnn::PermuteDescriptor({0, 3, 1, 2}),
-                                              "perm0312");
-
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::PermuteLayer>,
-                             &IsLayerOfType<armnn::PermuteLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeInversePermutes()));
-
-    // The permutes are removed.
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-}
-
 BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGDisabledTest)
 {
     Graph graph;
@@ -222,421 +151,6 @@ BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGEnabledTest)
     BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
 }
 
-BOOST_AUTO_TEST_CASE(MovePermuteUpTest)
-{
-    const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
-    const armnn::TensorInfo permuted({ 1, 3, 5, 2 }, armnn::DataType::Float32);
-
-    armnn::Graph graph;
-
-    armnn::LayerBindingId inputId = 0;
-
-    armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
-
-    std::string permuteLayerName = "original_permute";
-
-    // Insert permute
-    head = graph.InsertNewLayer<armnn::PermuteLayer>(head->GetInputSlot(0),
-                                                     armnn::PermuteDescriptor({ 0, 2, 3, 1 }),
-                                                     permuteLayerName.c_str());
-
-    head->GetOutputHandler().SetTensorInfo(permuted);
-
-    // Inserts layers that don't care about data format.
-    head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0),
-                                                        armnn::ActivationDescriptor{}, "");
-    head->GetOutputHandler().SetTensorInfo(info);
-
-    head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
-    head->GetOutputHandler().SetTensorInfo(info);
-
-    // Inserts input for 2nd input of Addition.
-    graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
-        ->GetOutputHandler().SetTensorInfo(info);
-
-    head = graph.InsertNewLayer<armnn::FakeQuantizationLayer>(head->GetInputSlot(0),
-                                                              armnn::FakeQuantizationDescriptor{}, "");
-    head->GetOutputHandler().SetTensorInfo(info);
-
-    head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
-    head->GetOutputHandler().SetTensorInfo(info);
-
-    head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
-    head->GetOutputHandler().SetTensorInfo(info);
-
-    head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), "");
-    head->GetOutputHandler().SetTensorInfo(info);
-
-    // Inserts input for 2nd input of Multiplication.
-    graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
-        ->GetOutputHandler().SetTensorInfo(info);
-
-    // Inserts input.
-    graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
-        ->GetOutputHandler().SetTensorInfo(info);
-
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::MultiplicationLayer>,
-                             &IsLayerOfType<armnn::MemCopyLayer>,
-                             &IsLayerOfType<armnn::FloorLayer>,
-                             &IsLayerOfType<armnn::FakeQuantizationLayer>,
-                             &IsLayerOfType<armnn::AdditionLayer>,
-                             &IsLayerOfType<armnn::ActivationLayer>,
-                             &IsLayerOfType<armnn::PermuteLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MovePermuteUp()));
-
-    // The permute is moved to the top. New permutes for layers with multiple inputs.
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::PermuteLayer>,
-                             &IsLayerOfType<armnn::PermuteLayer>,
-                             &IsLayerOfType<armnn::PermuteLayer>,
-                             &IsLayerOfType<armnn::MultiplicationLayer>,
-                             &IsLayerOfType<armnn::MemCopyLayer>,
-                             &IsLayerOfType<armnn::FloorLayer>,
-                             &IsLayerOfType<armnn::FakeQuantizationLayer>,
-                             &IsLayerOfType<armnn::AdditionLayer>,
-                             &IsLayerOfType<armnn::ActivationLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-
-    std::list<std::string> testRelatedLayers = { permuteLayerName };
-
-    BOOST_TEST(CheckRelatedLayers<armnn::PermuteLayer>(graph, testRelatedLayers));
-}
-
-BOOST_AUTO_TEST_CASE(PermuteAsReshapeTest)
-{
-    armnn::Graph graph;
-
-    std::string permuteLayerName = "permute";
-
-    const armnn::TensorInfo infoIn({ 1, 2, 3, 1 }, armnn::DataType::Float32);
-    const armnn::TensorInfo infoOut({ 1, 1, 2, 3 }, armnn::DataType::Float32);
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
-
-    graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input")
-        ->GetOutputHandler().SetTensorInfo(infoIn);
-
-    // Inserts permute.
-    graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0),
-                                              armnn::PermuteDescriptor({ 0, 2, 3, 1 }), permuteLayerName.c_str())
-        ->GetOutputHandler().SetTensorInfo(infoOut);
-
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::PermuteLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(PermuteAsReshape()));
-
-    // The permute is replaced by an equivalent reshape.
-
-    auto checkReshape = [&infoOut](const armnn::Layer* const layer) -> bool
-        {
-            const auto reshapeLayer = static_cast<const armnn::ReshapeLayer*>(layer);
-            return IsLayerOfType<armnn::ReshapeLayer>(layer) &&
-                   (reshapeLayer->GetParameters().m_TargetShape == infoOut.GetShape()) &&
-                   (reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == infoOut.GetShape());
-        };
-
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             checkReshape,
-                             &IsLayerOfType<armnn::OutputLayer>));
-
-
-    std::list<std::string> testRelatedLayers = { permuteLayerName };
-    BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
-}
-
-BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapesTest)
-{
-    armnn::Graph graph;
-
-    const armnn::TensorInfo info0({ 1, 2, 3, 5 }, armnn::DataType::Float32);
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
-    auto input = graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input");
-
-    input->GetOutputHandler().SetTensorInfo(info0);
-
-    {
-        // Inserts two reshapes.
-        const armnn::TensorInfo info1({1, 30, 1, 1}, armnn::DataType::Float32);
-        const armnn::TensorInfo info2({1, 2, 1, 15}, armnn::DataType::Float32);
-
-        std::string reshape1Name = "reshape1";
-        std::string reshape2Name = "reshape2";
-
-        auto reshape1 = graph.InsertNewLayer<armnn::ReshapeLayer>(output->GetInputSlot(0),
-                                                                  armnn::ReshapeDescriptor{ info1.GetShape() },
-                                                                  reshape1Name.c_str());
-        auto reshape2 = graph.InsertNewLayer<armnn::ReshapeLayer>(output->GetInputSlot(0),
-                                                                  armnn::ReshapeDescriptor{ info2.GetShape() },
-                                                                  reshape2Name.c_str());
-
-        reshape1->GetOutputHandler().SetTensorInfo(info1);
-        reshape2->GetOutputHandler().SetTensorInfo(info2);
-
-        BOOST_TEST(CheckSequence(graph.cbegin(),
-                                 graph.cend(),
-                                 &IsLayerOfType<armnn::InputLayer>,
-                                 &IsLayerOfType<armnn::ReshapeLayer>,
-                                 &IsLayerOfType<armnn::ReshapeLayer>,
-                                 &IsLayerOfType<armnn::OutputLayer>));
-
-        armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeConsecutiveReshapes()));
-
-        auto checkReshape = [&info2](const armnn::Layer* const layer) -> bool
-            {
-                const auto reshapeLayer = static_cast<const armnn::ReshapeLayer*>(layer);
-                return IsLayerOfType<armnn::ReshapeLayer>(layer) &&
-                    (reshapeLayer->GetParameters().m_TargetShape == info2.GetShape()) &&
-                    (reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == info2.GetShape());
-            };
-
-        // The two reshapes are replaced by a single equivalent reshape.
-        BOOST_TEST(CheckSequence(graph.cbegin(),
-                                 graph.cend(),
-                                 &IsLayerOfType<armnn::InputLayer>,
-                                 checkReshape,
-                                 &IsLayerOfType<armnn::OutputLayer>));
-
-        // Check the new reshape layer has the other two reshapes as related layers
-        std::list<std::string> testRelatedLayers = { reshape2Name, reshape1Name };
-
-        BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
-    }
-
-    {
-        // Inserts a reshape to the input shape.
-        auto reshapeToIn = graph.InsertNewLayer<armnn::ReshapeLayer>(output->GetInputSlot(0),
-                                                                     armnn::ReshapeDescriptor{ info0.GetShape() },
-                                                                     "reshapeToIn");
-
-        reshapeToIn->GetOutputHandler().SetTensorInfo(info0);
-
-        armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeConsecutiveReshapes()));
-
-        // The two reshapes are removed.
-        BOOST_TEST(CheckSequence(graph.cbegin(),
-                                 graph.cend(),
-                                 &IsLayerOfType<armnn::InputLayer>,
-                                 &IsLayerOfType<armnn::OutputLayer>));
-    }
-}
-
-BOOST_AUTO_TEST_CASE(SquashEqualSiblingsTest)
-{
-    armnn::Graph graph;
-
-    armnn::LayerBindingId outputId = 0;
-
-    const armnn::TensorInfo info({ 1, 2, 3, 5 }, armnn::DataType::Float32);
-    const armnn::TensorInfo permuted({ 1, 5, 2, 3 }, armnn::DataType::Float32);
-
-    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
-    input->GetOutputSlot().SetTensorInfo(info);
-
-    // Inserts equal permutes, equal reshapes and something else.
-    const armnn::PermuteDescriptor permDesc({ 0, 2, 3, 1 });
-    const armnn::ReshapeDescriptor reshapeDesc{ { 1, 3, 1, 5 } };
-
-    armnn::Layer* layer;
-
-    layer = graph.AddLayer<armnn::PermuteLayer>(permDesc, "");
-    layer->GetOutputSlot().SetTensorInfo(permuted);
-    layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
-    input->GetOutputSlot().Connect(layer->GetInputSlot(0));
-
-    layer = graph.AddLayer<armnn::ReshapeLayer>(reshapeDesc, "");
-    layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
-    input->GetOutputSlot().Connect(layer->GetInputSlot(0));
-
-    layer = graph.AddLayer<armnn::FloorLayer>("");
-    layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
-    input->GetOutputSlot().Connect(layer->GetInputSlot(0));
-
-    layer = graph.AddLayer<armnn::ReshapeLayer>(reshapeDesc, "");
-    layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
-    input->GetOutputSlot().Connect(layer->GetInputSlot(0));
-
-    layer = graph.AddLayer<armnn::PermuteLayer>(permDesc, "");
-    layer->GetOutputSlot().SetTensorInfo(permuted);
-    layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
-    input->GetOutputSlot().Connect(layer->GetInputSlot(0));
-
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::PermuteLayer>,
-                             &IsLayerOfType<armnn::ReshapeLayer>,
-                             &IsLayerOfType<armnn::FloorLayer>,
-                             &IsLayerOfType<armnn::ReshapeLayer>,
-                             &IsLayerOfType<armnn::PermuteLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(SquashEqualPermuteSiblings(),
-                                                            SquashEqualReshapeSiblings()));
-
-    // The permutes and reshapes are squashed.
-
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::PermuteLayer>,
-                             &IsLayerOfType<armnn::ReshapeLayer>,
-                             &IsLayerOfType<armnn::FloorLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-}
-
-BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest)
-{
-    armnn::Graph graph;
-
-    const armnn::TensorInfo info({ 1,1,1,2 }, armnn::DataType::Float32);
-
-    // Create the half precision input data
-    unsigned int dims[] = { 4,1,1,1 };
-    std::vector<float> convWeightsData{1.f, 2.f, 3.f, 4.f};
-    std::vector<uint16_t> halfWeights(4);
-    armnnUtils::FloatingPointConverter::ConvertFloat32To16(convWeightsData.data(),
-                                                           convWeightsData.size(),
-                                                           halfWeights.data());
-    armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float16), halfWeights);
-
-    //Create the simple test network
-    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
-    input->GetOutputSlot().SetTensorInfo(info);
-
-    auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
-    fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
-    fc->GetOutputSlot().SetTensorInfo(info);
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
-    //Connect up the layers
-    input->GetOutputSlot().Connect(fc->GetInputSlot(0));
-    fc->GetOutputSlot().Connect(output->GetInputSlot(0));
-
-    //Test the tensor info is correct.
-    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
-
-    // Run the optimizer
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsHalfToFloat()));
-
-    //Test the tensor info is correct.
-    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
-
-    // Now test the data matches float32 data
-    float* data = fc->m_Weight->GetTensor<float>();
-    BOOST_CHECK(1.0f == data[0]);
-    BOOST_CHECK(2.0f == data[1]);
-    BOOST_CHECK(3.0f == data[2]);
-    BOOST_CHECK(4.0f == data[3]);
-}
-
-BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest)
-{
-    armnn::Graph graph;
-
-    const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float16);
-
-    // Create const tensor from fp32 data
-    unsigned int dims[] = { 4, 1, 1, 1 };
-    std::vector<float> floatWeights{ 1.0f, 2.0f, 3.0f, 4.0f };
-    armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
-
-    // Create simple test network
-    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
-    input->GetOutputSlot().SetTensorInfo(info);
-
-    auto fc = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
-    fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
-    fc->GetOutputSlot().SetTensorInfo(info);
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
-    // Connect up the layers
-    input->GetOutputSlot().Connect(fc->GetInputSlot(0));
-    fc->GetOutputSlot().Connect(output->GetInputSlot(0));
-
-    // Check tensor data type before conversion
-    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
-
-    // Run the optimizer
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf()));
-
-    // Check tensor data type after conversion
-    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
-
-    // Check whether data matches expected fp16 data
-    Half* data = fc->m_Weight->GetTensor<Half>();
-    BOOST_CHECK(data[0] == Half(1.0f));
-    BOOST_CHECK(data[1] == Half(2.0f));
-    BOOST_CHECK(data[2] == Half(3.0f));
-    BOOST_CHECK(data[3] == Half(4.0f));
-}
-
-BOOST_AUTO_TEST_CASE(OptimizeInverseConversionsTest)
-{
-    armnn::Graph graph;
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
-
-    graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input");
-
-    // Fp32ToFp16 conversion followed by an inverse Fp16ToFp32 conversion
-    graph.InsertNewLayer<armnn::ConvertFp32ToFp16Layer>(output->GetInputSlot(0), "convert1");
-    graph.InsertNewLayer<armnn::ConvertFp16ToFp32Layer>(output->GetInputSlot(0), "convert2");
-
-    graph.InsertNewLayer<armnn::Convolution2dLayer>(output->GetInputSlot(0), Convolution2dDescriptor(), "conv");
-
-    // Fp16ToFp32 conversion followed by an inverse Fp32ToFp16 conversion
-    graph.InsertNewLayer<armnn::ConvertFp16ToFp32Layer>(output->GetInputSlot(0), "convert3");
-    graph.InsertNewLayer<armnn::ConvertFp32ToFp16Layer>(output->GetInputSlot(0), "convert4");
-
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
-                             &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
-                             &IsLayerOfType<armnn::Convolution2dLayer>,
-                             &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
-                             &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeInverseConversionsFp16(),
-                                                           OptimizeInverseConversionsFp32()));
-
-    // Check that all consecutive inverse conversions are removed
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::Convolution2dLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-}
-
 BOOST_AUTO_TEST_CASE(InsertConvertersTest)
 {
     const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float16);
@@ -728,79 +242,7 @@ BOOST_AUTO_TEST_CASE(InsertConvertersTest)
                              &IsLayerOfType<armnn::OutputLayer>));
 }
 
-BOOST_AUTO_TEST_CASE(Fp32NetworkToFp16OptimizationTest)
-{
-    armnn::Graph graph;
-
-    const armnn::TensorInfo infoFP32({ 2,2,1,3 }, armnn::DataType::Float32);
-
-    // Create the simple test network
-    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
-    input->GetOutputSlot().SetTensorInfo(infoFP32);
-
-    auto floor = graph.AddLayer<armnn::FloorLayer>("floor");
-    floor->GetOutputSlot().SetTensorInfo(infoFP32);
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
-    // Connect up the layers
-    input->GetOutputSlot().Connect(floor->GetInputSlot(0));
-    floor->GetOutputSlot().Connect(output->GetInputSlot(0));
-
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::FloorLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-
-    // Run the optimizer
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToFp16Converter()));
-
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
-                             &IsLayerOfType<armnn::FloorLayer>,
-                             &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-}
-
-BOOST_AUTO_TEST_CASE(InsertDebugOptimizationTest)
-{
-    armnn::Graph graph;
-
-    const armnn::TensorInfo info({ 2,2,1,3 }, armnn::DataType::Float32);
-
-    // Create the simple test network
-    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
-    input->GetOutputSlot().SetTensorInfo(info);
 
-    auto floor = graph.AddLayer<armnn::FloorLayer>("floor");
-    floor->GetOutputSlot().SetTensorInfo(info);
-
-    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
-
-    // Connect up the layers
-    input->GetOutputSlot().Connect(floor->GetInputSlot(0));
-    floor->GetOutputSlot().Connect(output->GetInputSlot(0));
-
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::FloorLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-
-    // Run the optimizer
-    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(InsertDebugLayer()));
-
-    BOOST_TEST(CheckSequence(graph.cbegin(),
-                             graph.cend(),
-                             &IsLayerOfType<armnn::InputLayer>,
-                             &IsLayerOfType<armnn::DebugLayer>,
-                             &IsLayerOfType<armnn::FloorLayer>,
-                             &IsLayerOfType<armnn::DebugLayer>,
-                             &IsLayerOfType<armnn::OutputLayer>));
-}
 
 void CreateConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
                               const unsigned int* weightsShape, const unsigned int* outputShape,
index 9129d91..9b1a3bf 100644 (file)
@@ -6,6 +6,44 @@
 #pragma once
 
 #include <armnn/INetwork.hpp>
+#include <Graph.hpp>
 
 void Connect(armnn::IConnectableLayer* from, armnn::IConnectableLayer* to, const armnn::TensorInfo& tensorInfo,
              unsigned int fromIndex = 0, unsigned int toIndex = 0);
+
+template <typename LayerT>
+bool IsLayerOfType(const armnn::Layer* const layer)
+{
+    return (layer->GetType() == armnn::LayerEnumOf<LayerT>());
+}
+
+inline bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last)
+{
+    return (first == last);
+}
+
+/// Checks each unary function in Us evaluates true for each correspondent layer in the sequence [first, last).
+template <typename U, typename... Us>
+bool CheckSequence(const armnn::Graph::ConstIterator first, const armnn::Graph::ConstIterator last, U&& u, Us&&... us)
+{
+    return u(*first) && CheckSequence(std::next(first), last, us...);
+}
+
+template <typename LayerT>
+bool CheckRelatedLayers(armnn::Graph& graph, const std::list<std::string>& testRelatedLayers)
+{
+    for (auto& layer : graph)
+    {
+        if (layer->GetType() == armnn::LayerEnumOf<LayerT>())
+        {
+            auto& relatedLayers = layer->GetRelatedLayerNames();
+            if (!std::equal(relatedLayers.begin(), relatedLayers.end(), testRelatedLayers.begin(),
+                            testRelatedLayers.end()))
+            {
+                return false;
+            }
+        }
+    }
+
+    return true;
+}
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
new file mode 100644 (file)
index 0000000..b40bd2d
--- /dev/null
@@ -0,0 +1,60 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../TestUtils.hpp"
+
+#include <Optimizer.hpp>
+#include <Half.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+using namespace armnn;
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+using namespace armnn::optimizations;
+
+BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest)
+{
+    armnn::Graph graph;
+
+    const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float16);
+
+    // Create const tensor from fp32 data
+    unsigned int dims[] = { 4, 1, 1, 1 };
+    std::vector<float> floatWeights{ 1.0f, 2.0f, 3.0f, 4.0f };
+    armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float32), floatWeights);
+
+    // Create simple test network
+    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
+    input->GetOutputSlot().SetTensorInfo(info);
+
+    auto fc      = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
+    fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+    fc->GetOutputSlot().SetTensorInfo(info);
+
+    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
+
+    // Connect up the layers
+    input->GetOutputSlot().Connect(fc->GetInputSlot(0));
+    fc->GetOutputSlot().Connect(output->GetInputSlot(0));
+
+    // Check tensor data type before conversion
+    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+
+    // Run the optimizer
+    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf()));
+
+    // Check tensor data type after conversion
+    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+
+    // Check whether data matches expected fp16 data
+    Half* data = fc->m_Weight->GetTensor<Half>();
+    BOOST_CHECK(data[0] == Half(1.0f));
+    BOOST_CHECK(data[1] == Half(2.0f));
+    BOOST_CHECK(data[2] == Half(3.0f));
+    BOOST_CHECK(data[3] == Half(4.0f));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
new file mode 100644 (file)
index 0000000..aca58ad
--- /dev/null
@@ -0,0 +1,60 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../TestUtils.hpp"
+
+#include <Optimizer.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+using namespace armnn::optimizations;
+
+BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest)
+{
+    armnn::Graph graph;
+
+    const armnn::TensorInfo info({ 1, 1, 1, 2 }, armnn::DataType::Float32);
+
+    // Create the half precision input data
+    unsigned int dims[] = { 4, 1, 1, 1 };
+    std::vector<float> convWeightsData{ 1.f, 2.f, 3.f, 4.f };
+    std::vector<uint16_t> halfWeights(4);
+    armnnUtils::FloatingPointConverter::ConvertFloat32To16(convWeightsData.data(), convWeightsData.size(),
+                                                           halfWeights.data());
+    armnn::ConstTensor weights(armnn::TensorInfo(4, dims, armnn::DataType::Float16), halfWeights);
+
+    //Create the simple test network
+    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
+    input->GetOutputSlot().SetTensorInfo(info);
+
+    auto fc      = graph.AddLayer<armnn::FullyConnectedLayer>(armnn::FullyConnectedDescriptor(), "fc");
+    fc->m_Weight = std::make_unique<armnn::ScopedCpuTensorHandle>(weights);
+    fc->GetOutputSlot().SetTensorInfo(info);
+
+    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
+
+    //Connect up the layers
+    input->GetOutputSlot().Connect(fc->GetInputSlot(0));
+    fc->GetOutputSlot().Connect(output->GetInputSlot(0));
+
+    //Test the tensor info is correct.
+    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+
+    // Run the optimizer
+    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsHalfToFloat()));
+
+    //Test the tensor info is correct.
+    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+
+    // Now test the data matches float32 data
+    float* data = fc->m_Weight->GetTensor<float>();
+    BOOST_CHECK(1.0f == data[0]);
+    BOOST_CHECK(2.0f == data[1]);
+    BOOST_CHECK(3.0f == data[2]);
+    BOOST_CHECK(4.0f == data[3]);
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
new file mode 100644 (file)
index 0000000..0e47b3f
--- /dev/null
@@ -0,0 +1,45 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../TestUtils.hpp"
+
+#include <Optimizer.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+using namespace armnn::optimizations;
+
+BOOST_AUTO_TEST_CASE(Fp32NetworkToFp16OptimizationTest)
+{
+    armnn::Graph graph;
+
+    const armnn::TensorInfo infoFP32({ 2, 2, 1, 3 }, armnn::DataType::Float32);
+
+    // Create the simple test network
+    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
+    input->GetOutputSlot().SetTensorInfo(infoFP32);
+
+    auto floor = graph.AddLayer<armnn::FloorLayer>("floor");
+    floor->GetOutputSlot().SetTensorInfo(infoFP32);
+
+    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
+
+    // Connect up the layers
+    input->GetOutputSlot().Connect(floor->GetInputSlot(0));
+    floor->GetOutputSlot().Connect(output->GetInputSlot(0));
+
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
+
+    // Run the optimizer
+    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToFp16Converter()));
+
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>, &IsLayerOfType<armnn::FloorLayer>,
+                             &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/InsertDebugLayerTests.cpp b/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
new file mode 100644 (file)
index 0000000..57cf463
--- /dev/null
@@ -0,0 +1,45 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../TestUtils.hpp"
+
+#include <Optimizer.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+using namespace armnn::optimizations;
+
+BOOST_AUTO_TEST_CASE(InsertDebugOptimizationTest)
+{
+    armnn::Graph graph;
+
+    const armnn::TensorInfo info({ 2, 2, 1, 3 }, armnn::DataType::Float32);
+
+    // Create the simple test network
+    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
+    input->GetOutputSlot().SetTensorInfo(info);
+
+    auto floor = graph.AddLayer<armnn::FloorLayer>("floor");
+    floor->GetOutputSlot().SetTensorInfo(info);
+
+    auto output = graph.AddLayer<armnn::OutputLayer>(1, "output");
+
+    // Connect up the layers
+    input->GetOutputSlot().Connect(floor->GetInputSlot(0));
+    floor->GetOutputSlot().Connect(output->GetInputSlot(0));
+
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
+
+    // Run the optimizer
+    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(InsertDebugLayer()));
+
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::DebugLayer>, &IsLayerOfType<armnn::FloorLayer>,
+                             &IsLayerOfType<armnn::DebugLayer>, &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/MovePermuteUpTests.cpp b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
new file mode 100644 (file)
index 0000000..2c297d6
--- /dev/null
@@ -0,0 +1,92 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../TestUtils.hpp"
+
+#include <Optimizer.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+using namespace armnn::optimizations;
+
+BOOST_AUTO_TEST_CASE(MovePermuteUpTest)
+{
+    const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
+    const armnn::TensorInfo permuted({ 1, 3, 5, 2 }, armnn::DataType::Float32);
+
+    armnn::Graph graph;
+
+    armnn::LayerBindingId inputId = 0;
+
+    armnn::Layer* head = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+    std::string permuteLayerName = "original_permute";
+
+    // Insert permute
+    head = graph.InsertNewLayer<armnn::PermuteLayer>(head->GetInputSlot(0), armnn::PermuteDescriptor({ 0, 2, 3, 1 }),
+                                                     permuteLayerName.c_str());
+
+    head->GetOutputHandler().SetTensorInfo(permuted);
+
+    // Inserts layers that don't care about data format.
+    head = graph.InsertNewLayer<armnn::ActivationLayer>(head->GetInputSlot(0), armnn::ActivationDescriptor{}, "");
+    head->GetOutputHandler().SetTensorInfo(info);
+
+    head = graph.InsertNewLayer<armnn::AdditionLayer>(head->GetInputSlot(0), "");
+    head->GetOutputHandler().SetTensorInfo(info);
+
+    // Inserts input for 2nd input of Addition.
+    graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
+        ->GetOutputHandler()
+        .SetTensorInfo(info);
+
+    head = graph.InsertNewLayer<armnn::FakeQuantizationLayer>(head->GetInputSlot(0),
+                                                              armnn::FakeQuantizationDescriptor{}, "");
+    head->GetOutputHandler().SetTensorInfo(info);
+
+    head = graph.InsertNewLayer<armnn::FloorLayer>(head->GetInputSlot(0), "");
+    head->GetOutputHandler().SetTensorInfo(info);
+
+    head = graph.InsertNewLayer<armnn::MemCopyLayer>(head->GetInputSlot(0), "");
+    head->GetOutputHandler().SetTensorInfo(info);
+
+    head = graph.InsertNewLayer<armnn::MultiplicationLayer>(head->GetInputSlot(0), "");
+    head->GetOutputHandler().SetTensorInfo(info);
+
+    // Inserts input for 2nd input of Multiplication.
+    graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(1), inputId++, "")
+        ->GetOutputHandler()
+        .SetTensorInfo(info);
+
+    // Inserts input.
+    graph.InsertNewLayer<armnn::InputLayer>(head->GetInputSlot(0), inputId++, "")
+        ->GetOutputHandler()
+        .SetTensorInfo(info);
+
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
+                             &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
+                             &IsLayerOfType<armnn::AdditionLayer>, &IsLayerOfType<armnn::ActivationLayer>,
+                             &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>));
+
+    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MovePermuteUp()));
+
+    // The permute is moved to the top. New permutes for layers with multiple inputs.
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>,
+                             &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
+                             &IsLayerOfType<armnn::MemCopyLayer>, &IsLayerOfType<armnn::FloorLayer>,
+                             &IsLayerOfType<armnn::FakeQuantizationLayer>, &IsLayerOfType<armnn::AdditionLayer>,
+                             &IsLayerOfType<armnn::ActivationLayer>, &IsLayerOfType<armnn::OutputLayer>));
+
+    std::list<std::string> testRelatedLayers = { permuteLayerName };
+
+    BOOST_TEST(CheckRelatedLayers<armnn::PermuteLayer>(graph, testRelatedLayers));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp b/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
new file mode 100644 (file)
index 0000000..d16b8f7
--- /dev/null
@@ -0,0 +1,80 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../TestUtils.hpp"
+
+#include <Optimizer.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+using namespace armnn::optimizations;
+
+BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapesTest)
+{
+    armnn::Graph graph;
+
+    const armnn::TensorInfo info0({ 1, 2, 3, 5 }, armnn::DataType::Float32);
+
+    auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+    auto input  = graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input");
+
+    input->GetOutputHandler().SetTensorInfo(info0);
+
+    {
+        // Inserts two reshapes.
+        const armnn::TensorInfo info1({ 1, 30, 1, 1 }, armnn::DataType::Float32);
+        const armnn::TensorInfo info2({ 1, 2, 1, 15 }, armnn::DataType::Float32);
+
+        std::string reshape1Name = "reshape1";
+        std::string reshape2Name = "reshape2";
+
+        auto reshape1 = graph.InsertNewLayer<armnn::ReshapeLayer>(
+            output->GetInputSlot(0), armnn::ReshapeDescriptor{ info1.GetShape() }, reshape1Name.c_str());
+        auto reshape2 = graph.InsertNewLayer<armnn::ReshapeLayer>(
+            output->GetInputSlot(0), armnn::ReshapeDescriptor{ info2.GetShape() }, reshape2Name.c_str());
+
+        reshape1->GetOutputHandler().SetTensorInfo(info1);
+        reshape2->GetOutputHandler().SetTensorInfo(info2);
+
+        BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                                 &IsLayerOfType<armnn::ReshapeLayer>, &IsLayerOfType<armnn::ReshapeLayer>,
+                                 &IsLayerOfType<armnn::OutputLayer>));
+
+        armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeConsecutiveReshapes()));
+
+        auto checkReshape = [&info2](const armnn::Layer* const layer) -> bool {
+            const auto reshapeLayer = static_cast<const armnn::ReshapeLayer*>(layer);
+            return IsLayerOfType<armnn::ReshapeLayer>(layer) &&
+                   (reshapeLayer->GetParameters().m_TargetShape == info2.GetShape()) &&
+                   (reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == info2.GetShape());
+        };
+
+        // The two reshapes are replaced by a single equivalent reshape.
+        BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
+                                 &IsLayerOfType<armnn::OutputLayer>));
+
+        // Check the new reshape layer has the other two reshapes as related layers
+        std::list<std::string> testRelatedLayers = { reshape2Name, reshape1Name };
+
+        BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
+    }
+
+    {
+        // Inserts a reshape to the input shape.
+        auto reshapeToIn = graph.InsertNewLayer<armnn::ReshapeLayer>(
+            output->GetInputSlot(0), armnn::ReshapeDescriptor{ info0.GetShape() }, "reshapeToIn");
+
+        reshapeToIn->GetOutputHandler().SetTensorInfo(info0);
+
+        armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeConsecutiveReshapes()));
+
+        // The two reshapes are removed.
+        BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                                 &IsLayerOfType<armnn::OutputLayer>));
+    }
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp b/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
new file mode 100644 (file)
index 0000000..130e498
--- /dev/null
@@ -0,0 +1,49 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../TestUtils.hpp"
+
+#include <Optimizer.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+using namespace armnn;
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+using namespace armnn::optimizations;
+
+BOOST_AUTO_TEST_CASE(OptimizeInverseConversionsTest)
+{
+    armnn::Graph graph;
+
+    auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+    graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input");
+
+    // Fp32ToFp16 conversion followed by an inverse Fp16ToFp32 conversion
+    graph.InsertNewLayer<armnn::ConvertFp32ToFp16Layer>(output->GetInputSlot(0), "convert1");
+    graph.InsertNewLayer<armnn::ConvertFp16ToFp32Layer>(output->GetInputSlot(0), "convert2");
+
+    graph.InsertNewLayer<armnn::Convolution2dLayer>(output->GetInputSlot(0), Convolution2dDescriptor(), "conv");
+
+    // Fp16ToFp32 conversion followed by an inverse Fp32ToFp16 conversion
+    graph.InsertNewLayer<armnn::ConvertFp16ToFp32Layer>(output->GetInputSlot(0), "convert3");
+    graph.InsertNewLayer<armnn::ConvertFp32ToFp16Layer>(output->GetInputSlot(0), "convert4");
+
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
+                             &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, &IsLayerOfType<armnn::Convolution2dLayer>,
+                             &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
+                             &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>, &IsLayerOfType<armnn::OutputLayer>));
+
+    armnn::Optimizer::Pass(
+        graph, armnn::MakeOptimizations(OptimizeInverseConversionsFp16(), OptimizeInverseConversionsFp32()));
+
+    // Check that all consecutive inverse conversions are removed
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp b/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
new file mode 100644 (file)
index 0000000..dcf9559
--- /dev/null
@@ -0,0 +1,42 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../TestUtils.hpp"
+
+#include <Optimizer.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+using namespace armnn;
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+using namespace armnn::optimizations;
+
+BOOST_AUTO_TEST_CASE(OptimizeInversePermutesTest)
+{
+    armnn::Graph graph;
+
+    auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+    graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input");
+
+    // Inserts two permutes, one the inverse of the other.
+    graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0), armnn::PermuteDescriptor({ 0, 2, 3, 1 }),
+                                              "perm0231");
+    graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0), armnn::PermuteDescriptor({ 0, 3, 1, 2 }),
+                                              "perm0312");
+
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>,
+                             &IsLayerOfType<armnn::OutputLayer>));
+
+    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeInversePermutes()));
+
+    // The permutes are removed.
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp b/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
new file mode 100644 (file)
index 0000000..b44331c
--- /dev/null
@@ -0,0 +1,60 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../TestUtils.hpp"
+
+#include <Optimizer.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+using namespace armnn;
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+using namespace armnn::optimizations;
+
+BOOST_AUTO_TEST_CASE(PermuteAsReshapeTest)
+{
+    armnn::Graph graph;
+
+    std::string permuteLayerName = "permute";
+
+    const armnn::TensorInfo infoIn({ 1, 2, 3, 1 }, armnn::DataType::Float32);
+    const armnn::TensorInfo infoOut({ 1, 1, 2, 3 }, armnn::DataType::Float32);
+
+    auto output = graph.AddLayer<armnn::OutputLayer>(0, "output");
+
+    graph.InsertNewLayer<armnn::InputLayer>(output->GetInputSlot(0), 0, "input")
+        ->GetOutputHandler()
+        .SetTensorInfo(infoIn);
+
+    // Inserts permute.
+    graph
+        .InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0), armnn::PermuteDescriptor({ 0, 2, 3, 1 }),
+                                             permuteLayerName.c_str())
+        ->GetOutputHandler()
+        .SetTensorInfo(infoOut);
+
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>));
+
+    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(PermuteAsReshape()));
+
+    // The permute is replaced by an equivalent reshape.
+
+    auto checkReshape = [&infoOut](const armnn::Layer* const layer) -> bool {
+        const auto reshapeLayer = static_cast<const armnn::ReshapeLayer*>(layer);
+        return IsLayerOfType<armnn::ReshapeLayer>(layer) &&
+               (reshapeLayer->GetParameters().m_TargetShape == infoOut.GetShape()) &&
+               (reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == infoOut.GetShape());
+    };
+
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
+                             &IsLayerOfType<armnn::OutputLayer>));
+
+    std::list<std::string> testRelatedLayers = { permuteLayerName };
+    BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp b/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
new file mode 100644 (file)
index 0000000..3fa49ec
--- /dev/null
@@ -0,0 +1,74 @@
+//
+// Copyright © 2017 Arm Ltd. All rights reserved.
+// SPDX-License-Identifier: MIT
+//
+
+#include "../TestUtils.hpp"
+
+#include <Optimizer.hpp>
+
+#include <boost/test/unit_test.hpp>
+
+using namespace armnn;
+
+BOOST_AUTO_TEST_SUITE(Optimizer)
+using namespace armnn::optimizations;
+
+BOOST_AUTO_TEST_CASE(SquashEqualSiblingsTest)
+{
+    armnn::Graph graph;
+
+    armnn::LayerBindingId outputId = 0;
+
+    const armnn::TensorInfo info({ 1, 2, 3, 5 }, armnn::DataType::Float32);
+    const armnn::TensorInfo permuted({ 1, 5, 2, 3 }, armnn::DataType::Float32);
+
+    auto input = graph.AddLayer<armnn::InputLayer>(0, "input");
+    input->GetOutputSlot().SetTensorInfo(info);
+
+    // Inserts equal permutes, equal reshapes and something else.
+    const armnn::PermuteDescriptor permDesc({ 0, 2, 3, 1 });
+    const armnn::ReshapeDescriptor reshapeDesc{ { 1, 3, 1, 5 } };
+
+    armnn::Layer* layer;
+
+    layer = graph.AddLayer<armnn::PermuteLayer>(permDesc, "");
+    layer->GetOutputSlot().SetTensorInfo(permuted);
+    layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+    input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+    layer = graph.AddLayer<armnn::ReshapeLayer>(reshapeDesc, "");
+    layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+    input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+    layer = graph.AddLayer<armnn::FloorLayer>("");
+    layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+    input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+    layer = graph.AddLayer<armnn::ReshapeLayer>(reshapeDesc, "");
+    layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+    input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+    layer = graph.AddLayer<armnn::PermuteLayer>(permDesc, "");
+    layer->GetOutputSlot().SetTensorInfo(permuted);
+    layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
+    input->GetOutputSlot().Connect(layer->GetInputSlot(0));
+
+    BOOST_TEST(CheckSequence(
+        graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::PermuteLayer>,
+        &IsLayerOfType<armnn::ReshapeLayer>, &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::ReshapeLayer>,
+        &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>,
+        &IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>));
+
+    armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(SquashEqualPermuteSiblings(), SquashEqualReshapeSiblings()));
+
+    // The permutes and reshapes are squashed.
+
+    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+                             &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::ReshapeLayer>,
+                             &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>,
+                             &IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>,
+                             &IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>));
+}
+
+BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file