[enco.caffe] Support IRGen over Concat layer (#1408)
author박종현/동작제어Lab(SR)/Staff Engineer/삼성전자 <jh1302.park@samsung.com>
Fri, 7 Sep 2018 07:48:42 +0000 (16:48 +0900)
committerGitHub Enterprise <noreply-CODE@samsung.com>
Fri, 7 Sep 2018 07:48:42 +0000 (16:48 +0900)
With this commit, enco caffe frontend is now able to generate coco IR
for Concat layer.

Signed-off-by: Jonghyun Park <jh1302.park@samsung.com>
contrib/enco/frontend/caffe/src/Frontend.cpp

index 1d46cfe..3998ec0 100644 (file)
@@ -1,6 +1,7 @@
 #include "Frontend.h"
 #include "ConvolutionSpec.h"
 #include "PoolingSpec.h"
+#include "ConcatSpec.h"
 
 #include <nncc/core/ADT/feature/CHWLayout.h>
 #include <nncc/core/ADT/kernel/Shape.h>
@@ -8,6 +9,7 @@
 #include <nncc/core/ADT/kernel/NCHWLayout.h>
 #include <nncc/core/ADT/tensor/Shape.h>
 #include <nncc/core/ADT/tensor/LexicalLayout.h>
+#include <nncc/core/ADT/tensor/IndexEnumerator.h>
 
 #include <morph/caffe.h>
 
@@ -402,6 +404,76 @@ enco::Bundle Frontend::load(void) const
       bag_ctx[ofm_name] = ofm_bag;
       shape_ctx[ofm_name] = ofm_shape;
     }
+    else if (layer.type() == "Concat")
+    {
+      assert(layer.bottom().size() > 0);
+      assert(layer.top().size() == 1);
+
+      // Assume default concat axis
+      // - Please refer to http://caffe.berkeleyvision.org/tutorial/layers/concat.html for details
+      // TODO Get concat axis from concat param
+      assert(!layer.has_concat_param());
+      const uint32_t concat_axis = 1;
+
+      // Construct a vector of input shapes
+      std::vector<tensor::Shape> in_shapes;
+      {
+        for (const auto &name : layer.bottom())
+        {
+          in_shapes.emplace_back(shape_ctx.at(name));
+        }
+      }
+
+      // Estimate output shape
+      const auto out_name = layer.top(0);
+      tensor::Shape out_shape = concat_spec(concat_axis).forward(in_shapes);
+
+      // Create an output bag
+      auto out_bag = m->entity()->bag()->create(num_elements(out_shape));
+
+      // Update coco IR
+      //
+      // NOTE The current implementation lowers "Concat" into a sequence of Shuffle instructions
+      uint32_t out_base = 0;
+
+      for (uint32_t n = 0; n < in_shapes.size(); ++n)
+      {
+        const auto &in_name = layer.bottom(n);
+        const auto &in_shape = in_shapes.at(n);
+        auto in_bag = bag_ctx.at(in_name);
+
+        // Create a Shuffle instruction
+        auto shuffle = m->entity()->instr()->create<coco::Shuffle>();
+
+        shuffle->from(in_bag);
+        shuffle->into(out_bag);
+
+        for (tensor::IndexEnumerator e{in_shape}; e.valid(); e.advance())
+        {
+          // NOTE Caffe use lexical layout
+          static const LexicalLayout l;
+          const auto &in_index = e.current();
+
+          auto out_index = e.current();
+          out_index.at(concat_axis) += out_base;
+
+          const auto in_elem = static_cast<uint32_t>(l.offset(in_shape, in_index));
+          const auto out_elem = static_cast<uint32_t>(l.offset(out_shape, out_index));
+
+          shuffle->insert(coco::ElemID{in_elem}, coco::ElemID{out_elem});
+        }
+
+        // Append the constructed Shuffle instruction
+        blk->instr()->append(shuffle);
+
+        // Update out base
+        out_base += in_shape.dim(concat_axis);
+      }
+
+      // Update bag and shape context
+      bag_ctx[out_name] = out_bag;
+      shape_ctx[out_name] = out_shape;
+    }
     else
     {
       throw std::runtime_error{"Not supported: " + layer.type()};