Update ARM CL to v18.05 (#1925)
author오형석/동작제어Lab(SR)/Staff Engineer/삼성전자 <hseok82.oh@samsung.com>
Wed, 11 Jul 2018 01:51:33 +0000 (10:51 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Wed, 11 Jul 2018 01:51:33 +0000 (10:51 +0900)
Patch to migrate ARM CL v18.05
- Use arm_compute/graph/frontend API to build graph data structure
- Update ACL submodule: v18.05

Signed-off-by: Hyeongseok Oh <hseok82.oh@samsung.com>
benchmark/acl/Benchmark.cpp
benchmark/acl/Benchmark.h
benchmark/acl/benchmark_googlenet.cpp
benchmark/acl/benchmark_inception_v3.cpp
benchmark/acl/benchmark_mobilenet.cpp
contrib/bindacl/src/nnapi_acl.cc
contrib/convacl/src/io_accessor.cc
contrib/convacl/src/nnapi_acl_conv.cc
contrib/kerneltesting/conv2d/nnfw_conv2d_test.cpp
externals/acl

index 01a9491..fe10ac1 100644 (file)
@@ -23,7 +23,7 @@ uint32_t Count::value(void) const { return _value; }
 
 using namespace boost::accumulators;
 
-void run_benchmark(arm_compute::graph::Graph &graph)
+void run_benchmark(arm_compute::graph::frontend::Stream &graph)
 {
   // NOTE Here the number of warming-up iterations is hardcoded
   // TODO Decide the number of warming-up iterations appropriately
index a39cca1..9ce1693 100644 (file)
@@ -2,7 +2,8 @@
 #define __ACL_BENCHMARK_H__
 
 #include "arm_compute/graph/ITensorAccessor.h"
-#include "arm_compute/graph/Graph.h"
+#include "arm_compute/graph.h"
+#include "arm_compute/core/CL/OpenCL.h"
 
 struct InputAccessor final : public arm_compute::graph::ITensorAccessor
 {
@@ -47,19 +48,19 @@ private:
   uint32_t _value;
 };
 
-inline arm_compute::graph::TargetHint set_target_hint(int target)
+inline arm_compute::graph::Target set_target_hint(int target)
 {
-    if(target == 1 && arm_compute::graph::Graph::opencl_is_available())
+    if(target == 1 && arm_compute::opencl_is_available())
     {
         // If type of target is OpenCL, check if OpenCL is available and initialize the scheduler
-        return arm_compute::graph::TargetHint::OPENCL;
+        return arm_compute::graph::Target::CL;
     }
     else
     {
-        return arm_compute::graph::TargetHint::NEON;
+        return arm_compute::graph::Target::NEON;
     }
 }
 
-void run_benchmark(arm_compute::graph::Graph &graph);
+void run_benchmark(arm_compute::graph::frontend::Stream &graph);
 
 #endif
index 59a034d..aa949cf 100644 (file)
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
-#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph.h"
 
 #include "Benchmark.h"
 
 #include <cstdlib>
 #include <tuple>
 
-using namespace arm_compute::graph;
+using namespace arm_compute::graph::frontend;
 
 namespace
 {
@@ -40,7 +38,8 @@ namespace
       std::tuple<unsigned int, unsigned int> c_filters,
       unsigned int d_filt)
   {
-    SubGraph    i_a;
+    Stream graph{0, "BENCHMARK_GOOGLENET"};
+    SubStream    i_a(graph);
     i_a << ConvolutionLayer(
         1U, 1U, a_filt,
         get_accessor<InputAccessor>(),
@@ -48,7 +47,7 @@ namespace
         PadStrideInfo(1, 1, 0, 0))
       << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-    SubGraph i_b;
+    SubStream i_b(graph);
     i_b << ConvolutionLayer(
         1U, 1U, std::get<0>(b_filters),
         get_accessor<InputAccessor>(),
@@ -62,7 +61,7 @@ namespace
           PadStrideInfo(1, 1, 1, 1))
       << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-    SubGraph i_c;
+    SubStream i_c(graph);
     i_c << ConvolutionLayer(
         1U, 1U, std::get<0>(c_filters),
         get_accessor<InputAccessor>(),
@@ -76,7 +75,7 @@ namespace
           PadStrideInfo(1, 1, 2, 2))
       << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-    SubGraph i_d;
+    SubStream i_d(graph);
     i_d << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL)))
       << ConvolutionLayer(
           1U, 1U, d_filt,
@@ -105,8 +104,8 @@ void main_graph_googlenet(int argc, const char **argv)
   constexpr float mean_b = 104.01f; /* Mean value to subtract from blue channel */
 
   // Set target. 0 (NEON), 1 (OpenCL). By default it is NEON
-  TargetHint            target_hint      = set_target_hint(argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0);
-  ConvolutionMethodHint convolution_hint = target_hint == TargetHint::NEON ? ConvolutionMethodHint::GEMM : ConvolutionMethodHint::DIRECT;
+  Target            target_hint      = set_target_hint(argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0);
+  ConvolutionMethod convolution_hint = target_hint == Target::NEON ? ConvolutionMethod::GEMM : ConvolutionMethod::DIRECT;
 
   // Parse arguments
   if(argc < 2)
@@ -140,10 +139,10 @@ void main_graph_googlenet(int argc, const char **argv)
     label     = argv[4];
   }
 
-  Graph graph;
+  Stream graph{0, "BENCHMARK_GOOGLENET"};
 
   graph << target_hint
-    << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
+    << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
         get_accessor<InputAccessor>())
     << ConvolutionLayer(
         7U, 7U, 64U,
@@ -185,7 +184,7 @@ void main_graph_googlenet(int argc, const char **argv)
         get_accessor<InputAccessor>(),
         get_accessor<InputAccessor>())
     << SoftmaxLayer()
-    << Tensor(get_accessor<OutputAccessor>());
+    << OutputLayer(get_accessor<OutputAccessor>());
 
   run_benchmark(graph);
 }
index 3e5df2f..5a31d71 100644 (file)
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
-#include "arm_compute/graph/SubGraph.h"
+#include "arm_compute/graph.h"
 
 #include "Benchmark.h"
 
 #include <cstdlib>
 #include <tuple>
 
-using namespace arm_compute::graph;
+using namespace arm_compute::graph::frontend;
 
 inline std::unique_ptr<arm_compute::graph::ITensorAccessor> get_input_accessor(void)
 {
@@ -68,7 +66,7 @@ public:
 
         // Set target. 0 (NEON), 1 (OpenCL), 2 (OpenCL with Tuner). By default it is NEON
         const int  int_target_hint = argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0;
-        TargetHint target_hint     = set_target_hint(int_target_hint);
+        Target target_hint     = set_target_hint(int_target_hint);
 
         // Parse arguments
         if(argc < 2)
@@ -102,7 +100,7 @@ public:
             label     = argv[4];
         }
 
-        graph << target_hint << Tensor(TensorInfo(TensorShape(299U, 299U, 3U, 1U), 1, DataType::F32),
+        graph << target_hint << InputLayer(TensorDescriptor(TensorShape(299U, 299U, 3U, 1U), DataType::F32),
                                        get_input_accessor())
 
               << ConvolutionLayer(3U, 3U, 32U,
@@ -114,7 +112,8 @@ public:
                                                               "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_BatchNorm_moving_variance.npy"),
                                          get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
                                                                                              "/cnn_data/inceptionv3_model/Conv2d_1a_3x3_BatchNorm_beta.npy"),
-                                         0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                                         0.001f)
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
 
               << ConvolutionLayer(3U, 3U, 32U,
                                   get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_2a_3x3_weights.npy"),
@@ -125,7 +124,8 @@ public:
                                                               "/cnn_data/inceptionv3_model/Conv2d_2a_3x3_BatchNorm_moving_variance.npy"),
                                          get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
                                                                                              "/cnn_data/inceptionv3_model/Conv2d_2a_3x3_BatchNorm_beta.npy"),
-                                         0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                                         0.001f)
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
 
               << ConvolutionLayer(3U, 3U, 64U,
                                   get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_2b_3x3_weights.npy"),
@@ -136,7 +136,8 @@ public:
                                                               "/cnn_data/inceptionv3_model/Conv2d_2b_3x3_BatchNorm_moving_variance.npy"),
                                          get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
                                                                                              "/cnn_data/inceptionv3_model/Conv2d_2b_3x3_BatchNorm_beta.npy"),
-                                         0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                                         0.001f)
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
 
               << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
 
@@ -149,7 +150,8 @@ public:
                                                               "/cnn_data/inceptionv3_model/Conv2d_3b_1x1_BatchNorm_moving_variance.npy"),
                                          get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
                                                                                              "/cnn_data/inceptionv3_model/Conv2d_3b_1x1_BatchNorm_beta.npy"),
-                                         0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                                         0.001f)
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
 
               << ConvolutionLayer(3U, 3U, 192U,
                                   get_weights_accessor(data_path, "/cnn_data/inceptionv3_model/Conv2d_4a_3x3_weights.npy"),
@@ -160,7 +162,8 @@ public:
                                                               "/cnn_data/inceptionv3_model/Conv2d_4a_3x3_BatchNorm_moving_variance.npy"),
                                          get_random_accessor(1.f, 1.f), get_weights_accessor(data_path,
                                                                                              "/cnn_data/inceptionv3_model/Conv2d_4a_3x3_BatchNorm_beta.npy"),
-                                         0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                                         0.001f)
+              << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
 
               << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
 
@@ -197,10 +200,12 @@ public:
                                                        "/cnn_data/inceptionv3_model/Logits_Conv2d_1c_1x1_biases.npy"),
                                   PadStrideInfo(1, 1, 0, 0))
               << ReshapeLayer(TensorShape(1001U)) << SoftmaxLayer()
-              << Tensor(get_output_accessor());
+              << OutputLayer(get_output_accessor());
 
-        // In order to enable the OpenCL tuner, graph_init() has to be called only when all nodes have been instantiated
-        graph.graph_init(int_target_hint == 2);
+        // Finalize graph
+        GraphConfig config;
+        config.use_tuner = (int_target_hint == 2);
+        graph.finalize(target_hint, config);
     }
 
     void do_run()
@@ -209,7 +214,7 @@ public:
     }
 
 private:
-    Graph graph{};
+    Stream graph{0, "BENCHMARK_INCEPTION_V3"};
 
 private:
     BranchLayer get_inception_node_A(const std::string &data_path, std::string &&param_path,
@@ -230,7 +235,7 @@ private:
             conv_id1 = "_1_0c_";
         }
 
-        SubGraph i_a;
+        SubStream i_a(graph);
         i_a << ConvolutionLayer(
                 1U, 1U, a_filt,
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
@@ -241,9 +246,10 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 1U, 1U, std::get<0>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_weights.npy"),
@@ -254,7 +260,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id0 + "1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 5U, 5U, std::get<1>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_weights.npy"),
@@ -265,9 +272,10 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv" + conv_id1 + "5x5_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
+        SubStream i_c(graph);
         i_c << ConvolutionLayer(
                 1U, 1U, std::get<0>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
@@ -278,7 +286,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<1>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_weights.npy"),
@@ -289,7 +298,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<2>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_weights.npy"),
@@ -300,9 +310,10 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_3x3_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_d;
+        SubStream i_d(graph);
         i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
             << ConvolutionLayer(
                 1U, 1U, d_filt,
@@ -314,7 +325,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
         return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d));
     }
@@ -324,7 +336,7 @@ private:
                                      std::tuple<unsigned int, unsigned int, unsigned int> b_filters)
     {
         std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_";
-        SubGraph    i_a;
+        SubStream    i_a(graph);
         i_a << ConvolutionLayer(
                 3U, 3U, a_filt,
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_weights.npy"),
@@ -335,9 +347,10 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 1U, 1U, std::get<0>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
@@ -348,7 +361,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<1>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_weights.npy"),
@@ -359,7 +373,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_3x3_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<2>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_weights.npy"),
@@ -370,9 +385,10 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
+        SubStream i_c(graph);
         i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
 
@@ -386,7 +402,7 @@ private:
                                      unsigned int d_filt)
     {
         std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_";
-        SubGraph    i_a;
+        SubStream    i_a(graph);
         i_a << ConvolutionLayer(
                 1U, 1U, a_filt,
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
@@ -397,9 +413,10 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 1U, 1U, std::get<0>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
@@ -410,7 +427,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 7U, 1U, std::get<1>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_weights.npy"),
@@ -421,7 +439,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 1U, 7U, std::get<2>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_weights.npy"),
@@ -432,9 +451,10 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
+        SubStream i_c(graph);
         i_c << ConvolutionLayer(
                 1U, 1U, std::get<0>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
@@ -445,7 +465,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 1U, 7U, std::get<1>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_weights.npy"),
@@ -456,7 +477,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_7x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 7U, 1U, std::get<2>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_weights.npy"),
@@ -467,7 +489,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x7_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 1U, 7U, std::get<3>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_weights.npy"),
@@ -478,7 +501,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_7x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 7U, 1U, std::get<4>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_weights.npy"),
@@ -489,9 +513,10 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0e_1x7_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_d;
+        SubStream i_d(graph);
         i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
             << ConvolutionLayer(
                 1U, 1U, d_filt,
@@ -503,7 +528,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
         return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d));
     }
@@ -513,7 +539,7 @@ private:
                                      std::tuple<unsigned int, unsigned int, unsigned int, unsigned int> b_filters)
     {
         std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_";
-        SubGraph    i_a;
+        SubStream    i_a(graph);
         i_a << ConvolutionLayer(
                 1U, 1U, std::get<0>(a_filters),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
@@ -524,7 +550,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<1>(a_filters),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_weights.npy"),
@@ -535,9 +562,10 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_1a_3x3_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 1U, 1U, std::get<0>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
@@ -548,7 +576,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 7U, 1U, std::get<1>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_weights.npy"),
@@ -559,7 +588,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x7_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 1U, 7U, std::get<2>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_weights.npy"),
@@ -570,7 +600,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0c_7x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<3>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_weights.npy"),
@@ -581,9 +612,10 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_1a_3x3_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
+        SubStream i_c(graph);
         i_c << PoolingLayer(PoolingLayerInfo(PoolingType::MAX, 3, PadStrideInfo(2, 2, 0, 0, DimensionRoundingType::CEIL)))
             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::LINEAR, 1.f, 0.f));
 
@@ -605,7 +637,7 @@ private:
         }
 
         std::string total_path = "/cnn_data/inceptionv3_model/" + param_path + "_";
-        SubGraph    i_a;
+        SubStream    i_a(graph);
         i_a << ConvolutionLayer(
                 1U, 1U, a_filt,
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_weights.npy"),
@@ -616,9 +648,10 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_0_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b1;
+        SubStream i_b1(graph);
         i_b1 << ConvolutionLayer(
                  3U, 1U, std::get<1>(b_filters),
                  get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_weights.npy"),
@@ -629,9 +662,10 @@ private:
                  get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_moving_variance.npy"),
                  get_random_accessor(1.f, 1.f),
                  get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0b_1x3_BatchNorm_beta.npy"),
-                 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                 0.001f)
+             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b2;
+        SubStream i_b2(graph);
         i_b2 << ConvolutionLayer(
                  1U, 3U, std::get<2>(b_filters),
                  get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_weights.npy"),
@@ -642,9 +676,10 @@ private:
                  get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_moving_variance.npy"),
                  get_random_accessor(1.f, 1.f),
                  get_weights_accessor(data_path, total_path + "Branch_1_Conv2d" + conv_id + "3x1_BatchNorm_beta.npy"),
-                 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                 0.001f)
+             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_b;
+        SubStream i_b(graph);
         i_b << ConvolutionLayer(
                 1U, 1U, std::get<0>(b_filters),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_weights.npy"),
@@ -655,10 +690,11 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_1_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_b1), std::move(i_b2));
 
-        SubGraph i_c1;
+        SubStream i_c1(graph);
         i_c1 << ConvolutionLayer(
                  3U, 1U, std::get<2>(c_filters),
                  get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_weights.npy"),
@@ -669,9 +705,10 @@ private:
                  get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_moving_variance.npy"),
                  get_random_accessor(1.f, 1.f),
                  get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0c_1x3_BatchNorm_beta.npy"),
-                 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                 0.001f)
+             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c2;
+        SubStream i_c2(graph);
         i_c2 << ConvolutionLayer(
                  1U, 3U, std::get<3>(c_filters),
                  get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_weights.npy"),
@@ -682,9 +719,10 @@ private:
                  get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_moving_variance.npy"),
                  get_random_accessor(1.f, 1.f),
                  get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0d_3x1_BatchNorm_beta.npy"),
-                 0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                 0.001f)
+             << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
-        SubGraph i_c;
+        SubStream i_c(graph);
         i_c << ConvolutionLayer(
                 1U, 1U, std::get<0>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_weights.npy"),
@@ -695,7 +733,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0a_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << ConvolutionLayer(
                 3U, 3U, std::get<1>(c_filters),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_weights.npy"),
@@ -706,10 +745,11 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_2_Conv2d_0b_3x3_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU))
             << BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_c1), std::move(i_c2));
 
-        SubGraph i_d;
+        SubStream i_d(graph);
         i_d << PoolingLayer(PoolingLayerInfo(PoolingType::AVG, 3, PadStrideInfo(1, 1, 1, 1, DimensionRoundingType::CEIL), true))
             << ConvolutionLayer(
                 1U, 1U, d_filt,
@@ -721,7 +761,8 @@ private:
                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_moving_variance.npy"),
                 get_random_accessor(1.f, 1.f),
                 get_weights_accessor(data_path, total_path + "Branch_3_Conv2d_0b_1x1_BatchNorm_beta.npy"),
-                0.001f, ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
+                0.001f)
+            << ActivationLayer(ActivationLayerInfo(ActivationLayerInfo::ActivationFunction::RELU));
 
         return BranchLayer(BranchMergeMethod::DEPTH_CONCATENATE, std::move(i_a), std::move(i_b), std::move(i_c), std::move(i_d));
     }
index 8932bf6..aecb62a 100644 (file)
  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  * SOFTWARE.
  */
-#include "arm_compute/graph/Graph.h"
-#include "arm_compute/graph/Nodes.h"
+#include "arm_compute/graph.h"
 
 #include "Benchmark.h"
 
 #include <cstdlib>
 
-using namespace arm_compute::graph;
+using namespace arm_compute::graph::frontend;
 
 namespace
 {
@@ -37,13 +36,13 @@ namespace
       PadStrideInfo dwc_pad_stride_info, PadStrideInfo conv_pad_stride_info)
   {
     std::string total_path = "/cnn_data/mobilenet_v1_model/" + param_path + "_";
-    SubGraph    sg;
+    Stream graph{0, "BENCHMARK_MOBILENET"};
+    SubStream    sg(graph);
     sg << DepthwiseConvolutionLayer(
         3U, 3U,
         get_accessor<InputAccessor>(),
         std::unique_ptr<arm_compute::graph::ITensorAccessor>(nullptr),
-        dwc_pad_stride_info,
-        true)
+        dwc_pad_stride_info)
       << BatchNormalizationLayer(
           get_accessor<InputAccessor>(),
           get_accessor<InputAccessor>(),
@@ -84,7 +83,7 @@ void main_graph_mobilenet(int argc, const char **argv)
   constexpr float mean_b = 104.01f; /* Mean value to subtract from blue channel */
 
   // Set target. 0 (NEON), 1 (OpenCL). By default it is NEON
-  TargetHint target_hint = set_target_hint(argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0);
+  Target target_hint = set_target_hint(argc > 1 ? std::strtol(argv[1], nullptr, 10) : 0);
 
   // Parse arguments
   if(argc < 2)
@@ -118,10 +117,10 @@ void main_graph_mobilenet(int argc, const char **argv)
     label     = argv[4];
   }
 
-  Graph graph;
+  Stream graph{0, "BENCHMARK_MOBILENET"};
 
   graph << target_hint
-    << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
+    << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
         get_accessor<InputAccessor>())
     << ConvolutionLayer(
         3U, 3U, 32U,
@@ -157,7 +156,7 @@ void main_graph_mobilenet(int argc, const char **argv)
         PadStrideInfo(1, 1, 0, 0))
     << ReshapeLayer(TensorShape(1001U))
     << SoftmaxLayer()
-    << Tensor(get_accessor<OutputAccessor>());
+    << OutputLayer(get_accessor<OutputAccessor>());
 
   run_benchmark(graph);
 }
index 7997960..d943246 100644 (file)
@@ -47,8 +47,8 @@
 #include <memory>
 #include <boost/format.hpp>
 // ACL Headers
-#include <arm_compute/graph/Graph.h>
-#include <arm_compute/graph/Nodes.h>
+#include <arm_compute/graph.h>
+#include <arm_compute/core/TensorInfo.h>
 
 #include "util/environment.h"
 
@@ -172,7 +172,7 @@ struct ANeuralNetworksExecution
 {
   // ANeuralNetworksExecution corresponds to NPU::Interp::Session
 
-  arm_compute::graph::Graph graph;
+  arm_compute::graph::frontend::Stream graph{0, "BIND_ACL"};
 };
 
 class DummyInputAccessor : public arm_compute::graph::ITensorAccessor
@@ -211,24 +211,23 @@ int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation, ANe
   *execution = new ANeuralNetworksExecution;
 
   using arm_compute::DataType;
-  using arm_compute::graph::Tensor;
-  using arm_compute::graph::TargetHint;
-  using arm_compute::graph::Graph;
-  using arm_compute::TensorInfo;
+  using arm_compute::graph::Target;
+  using arm_compute::graph::TensorDescriptor;
   using arm_compute::TensorShape;
+  using arm_compute::graph::frontend::InputLayer;
+  using arm_compute::graph::frontend::OutputLayer;
 
   ANeuralNetworksExecution* execlocal = *execution;
-  arm_compute::graph::Graph& graph = execlocal->graph;
+  arm_compute::graph::frontend::Stream& graph = execlocal->graph;
 
-  TargetHint target_hint = nnfw::util::get_env_int("NNFW_ACL_USENEON")
-                           ? TargetHint::NEON : TargetHint::OPENCL;
+  Target target_hint = nnfw::util::get_env_int("NNFW_ACL_USENEON")
+                           ? Target::NEON : Target::CL;
 
   graph << target_hint
-        << Tensor(TensorInfo(TensorShape(224U, 224U, 3U, 1U), 1, DataType::F32),
+        << InputLayer(TensorDescriptor(TensorShape(224U, 224U, 3U, 1U), DataType::F32),
                   std::unique_ptr<DummyInputAccessor>(new DummyInputAccessor()))
-        << arm_compute::graph::SoftmaxLayer()
-        << Tensor(std::unique_ptr<DummyOutputAccessor>(new DummyOutputAccessor()))
-        ;
+        << arm_compute::graph::frontend::SoftmaxLayer()
+        << OutputLayer((std::unique_ptr<DummyOutputAccessor>(new DummyOutputAccessor())));
 
   std::cout << __FUNCTION__ << " ---" << std::endl;
   return ANEURALNETWORKS_NO_ERROR;
@@ -252,7 +251,7 @@ int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution, A
 
   // graph.run() fails with segment fail when only target_hint is added.
   // after fix adding 'Tensor' we may call graph.run()
-  arm_compute::graph::Graph& graph = execution->graph;
+  arm_compute::graph::frontend::Stream& graph = execution->graph;
   graph.run();
 
   std::cout << __FUNCTION__ << " ---" << std::endl;
index 8ed4203..b7fdee7 100644 (file)
@@ -38,6 +38,7 @@
  * SOFTWARE.
  */
 #include "io_accessor.h"
+#include <arm_compute/core/Helpers.h>
 #include <ostream>
 
 bool InputAccessor::access_tensor(arm_compute::ITensor &tensor)
index 38ebb68..091d194 100644 (file)
@@ -46,8 +46,7 @@
 #include <memory>
 #include <boost/format.hpp>
 // ACL Headers
-#include <arm_compute/graph/Graph.h>
-#include <arm_compute/graph/Nodes.h>
+#include <arm_compute/graph.h>
 
 #include "util/environment.h"
 #include "io_accessor.h"
@@ -172,7 +171,7 @@ struct ANeuralNetworksExecution
 {
   // ANeuralNetworksExecution corresponds to NPU::Interp::Session
 
-  arm_compute::graph::Graph graph;
+  arm_compute::graph::frontend::Stream graph{0, "ACL_CONV"};
 };
 
 int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation, ANeuralNetworksExecution** execution)
@@ -181,30 +180,29 @@ int ANeuralNetworksExecution_create(ANeuralNetworksCompilation* compilation, ANe
   *execution = new ANeuralNetworksExecution;
 
   using arm_compute::DataType;
-  using arm_compute::graph::Tensor;
-  using arm_compute::graph::TargetHint;
-  using arm_compute::graph::Graph;
-  using arm_compute::TensorInfo;
+  using arm_compute::graph::Target;
+  using arm_compute::graph::TensorDescriptor;
   using arm_compute::TensorShape;
+  using arm_compute::graph::frontend::InputLayer;
+  using arm_compute::graph::frontend::OutputLayer;
 
   ANeuralNetworksExecution* execlocal = *execution;
-  arm_compute::graph::Graph& graph = execlocal->graph;
+  arm_compute::graph::frontend::Stream& graph = execlocal->graph;
 
-  TargetHint target_hint = nnfw::util::get_env_int("NNFW_ACL_USENEON")
-                           ? TargetHint::NEON : TargetHint::OPENCL;
+  Target target_hint = nnfw::util::get_env_int("NNFW_ACL_USENEON")
+                           ? Target::NEON : Target::CL;
   bool autoinc = nnfw::util::get_env_bool("NNFW_TEST_AUTOINC");
 
   graph << target_hint
-        << Tensor(TensorInfo(TensorShape(3U, 3U, 1U, 1U), 1, DataType::F32),
+        << InputLayer(TensorDescriptor(TensorShape(3U, 3U, 1U, 1U), DataType::F32),
                   std::unique_ptr<InputAccessor>(new InputAccessor(autoinc)))
-        << arm_compute::graph::ConvolutionLayer(
+        << arm_compute::graph::frontend::ConvolutionLayer(
               3U, 3U, 1U,
               std::unique_ptr<WeightAccessor>(new WeightAccessor(autoinc)),
               std::unique_ptr<BiasAccessor>(new BiasAccessor()),
               arm_compute::PadStrideInfo(1, 1, 0, 0))
-        << Tensor(
+        << OutputLayer(
               std::unique_ptr<OutputAccessor>(new OutputAccessor()));
-        ;
 
   std::cout << __FUNCTION__ << " ---" << std::endl;
   return ANEURALNETWORKS_NO_ERROR;
@@ -228,7 +226,7 @@ int ANeuralNetworksExecution_startCompute(ANeuralNetworksExecution* execution, A
 
   // graph.run() fails with segment fail when only target_hint is added.
   // after fix adding 'Tensor' we may call graph.run()
-  arm_compute::graph::Graph& graph = execution->graph;
+  arm_compute::graph::frontend::Stream& graph = execution->graph;
   graph.run();
 
   std::cout << __FUNCTION__ << " ---" << std::endl;
index 1d9c24c..04b2175 100644 (file)
@@ -50,8 +50,7 @@
 #include "optimized_ops.h"
 #include "OperationUtils.h"
 
-#include <arm_compute/graph/Graph.h>
-#include <arm_compute/graph/Nodes.h>
+#include <arm_compute/graph.h>
 
 #include <arm_compute/runtime/CL/CLFunctions.h>
 #include <arm_compute/runtime/CL/functions/CLConvolution.h>
@@ -130,11 +129,11 @@ bool convFloat32(const float* inputData, const Shape& inputShape,
 //-----------------------------------------------------------------------------
 
 using arm_compute::DataType;
-using arm_compute::graph::Tensor;
-using arm_compute::graph::TargetHint;
-using arm_compute::graph::Graph;
-using arm_compute::TensorInfo;
+using arm_compute::graph::Target;
+using arm_compute::graph::TensorDescriptor;
 using arm_compute::TensorShape;
+using arm_compute::graph::frontend::InputLayer;
+using arm_compute::graph::frontend::OutputLayer;
 
 namespace acl_graph {
 
@@ -148,10 +147,10 @@ bool convFloat32(const float* inputData, const Shape& inputShape,
                  float* outputData, const Shape& outputShape)
 {
   // Try with simple build-run with ACL Layer
-  arm_compute::graph::Graph graph;
+  arm_compute::graph::frontend::Stream graph{0, "ACL_CONV2D_TEST"};
 
-  TargetHint target_hint = nnfw::util::get_env_int("NNFW_ACL_USENEON")
-                           ? TargetHint::NEON : TargetHint::OPENCL;
+  Target target_hint = nnfw::util::get_env_int("NNFW_ACL_USENEON")
+                           ? Target::NEON : Target::CL;
 
   // Not sure about which index is which value
   uint32_t tsi_c = getSizeOfDimension(inputShape, 0);
@@ -164,9 +163,9 @@ bool convFloat32(const float* inputData, const Shape& inputShape,
   uint32_t tsk_n = getSizeOfDimension(filterShape, 3);
 
   graph << target_hint
-        << Tensor(TensorInfo(TensorShape(tsi_w, tsi_h, tsi_c, tsi_n), 1, DataType::F32),
+        << InputLayer(TensorDescriptor(TensorShape(tsi_w, tsi_h, tsi_c, tsi_n), DataType::F32),
                   std::unique_ptr<InputAccessor>(new InputAccessor(inputData, inputShape)))
-        << arm_compute::graph::ConvolutionLayer(
+        << arm_compute::graph::frontend::ConvolutionLayer(
               tsk_w, tsk_h, tsk_n,
               std::unique_ptr<WeightAccessor>(new WeightAccessor(filterData, filterShape)),
               std::unique_ptr<BiasAccessor>(new BiasAccessor(biasData, biasShape)),
@@ -176,16 +175,16 @@ bool convFloat32(const float* inputData, const Shape& inputShape,
     arm_compute::ActivationLayerInfo::ActivationFunction actFunc =
         arm_compute::ActivationLayerInfo::ActivationFunction::RELU;
 
-    graph << arm_compute::graph::ActivationLayer(arm_compute::ActivationLayerInfo(actFunc));
+    graph << arm_compute::graph::frontend::ActivationLayer(arm_compute::ActivationLayerInfo(actFunc));
     // Activation does not provide output Tensor and makes next layer fail to add to graph
     // when it's the last(output) layer. To solve this, need to add a dummy layer.
     uint32_t tso_c = getSizeOfDimension(outputShape, 0);
     uint32_t tso_h = getSizeOfDimension(outputShape, 1);
     uint32_t tso_w = getSizeOfDimension(outputShape, 2);
     uint32_t tso_n = getSizeOfDimension(outputShape, 3);
-    graph << arm_compute::graph::ReshapeLayer(TensorShape(tso_w, tso_h, tso_c, tso_n));
+    graph << arm_compute::graph::frontend::ReshapeLayer(TensorShape(tso_w, tso_h, tso_c, tso_n));
   }
-  graph << Tensor(std::unique_ptr<OutputAccessor>(new OutputAccessor(outputData, outputShape)))
+  graph << OutputLayer(std::unique_ptr<OutputAccessor>(new OutputAccessor(outputData, outputShape)))
         ;
 
   graph.run();
@@ -197,6 +196,8 @@ bool convFloat32(const float* inputData, const Shape& inputShape,
 
 //-----------------------------------------------------------------------------
 
+using arm_compute::TensorInfo;
+
 namespace acl_runtime {
 
 TensorShape calculate_convolution_layer_output_shape(
index 6778eb1..a5793e6 160000 (submodule)
@@ -1 +1 @@
-Subproject commit 6778eb173ae3b64b06e3626891052d3e1fb964f4
+Subproject commit a5793e66f2a660a3bf0512165bd92d15767de3fe