From: 박종현/동작제어Lab(SR)/Staff Engineer/삼성전자 Date: Fri, 7 Sep 2018 07:48:42 +0000 (+0900) Subject: [enco.caffe] Support IRGen over Concat layer (#1408) X-Git-Tag: nncc_backup~1886 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=680500ac46a44e38d8fff8291cfaec2ecc0e6da3;p=platform%2Fcore%2Fml%2Fnnfw.git [enco.caffe] Support IRGen over Concat layer (#1408) With this commit, enco caffe frontend is now able to generate coco IR for Concat layer. Signed-off-by: Jonghyun Park --- diff --git a/contrib/enco/frontend/caffe/src/Frontend.cpp b/contrib/enco/frontend/caffe/src/Frontend.cpp index 1d46cfe..3998ec0 100644 --- a/contrib/enco/frontend/caffe/src/Frontend.cpp +++ b/contrib/enco/frontend/caffe/src/Frontend.cpp @@ -1,6 +1,7 @@ #include "Frontend.h" #include "ConvolutionSpec.h" #include "PoolingSpec.h" +#include "ConcatSpec.h" #include #include @@ -8,6 +9,7 @@ #include #include #include +#include #include @@ -402,6 +404,76 @@ enco::Bundle Frontend::load(void) const bag_ctx[ofm_name] = ofm_bag; shape_ctx[ofm_name] = ofm_shape; } + else if (layer.type() == "Concat") + { + assert(layer.bottom().size() > 0); + assert(layer.top().size() == 1); + + // Assume default concat axis + // - Please refer to http://caffe.berkeleyvision.org/tutorial/layers/concat.html for details + // TODO Get concat axis from concat param + assert(!layer.has_concat_param()); + const uint32_t concat_axis = 1; + + // Construct a vector of input shapes + std::vector in_shapes; + { + for (const auto &name : layer.bottom()) + { + in_shapes.emplace_back(shape_ctx.at(name)); + } + } + + // Estimate output shape + const auto out_name = layer.top(0); + tensor::Shape out_shape = concat_spec(concat_axis).forward(in_shapes); + + // Create an output bag + auto out_bag = m->entity()->bag()->create(num_elements(out_shape)); + + // Update coco IR + // + // NOTE The current implementation lowers "Concat" into a sequence of Shuffle instructions + uint32_t out_base = 0; + + for (uint32_t n = 0; n < in_shapes.size(); ++n) + { + const auto &in_name = layer.bottom(n); + const auto &in_shape = in_shapes.at(n); + auto in_bag = bag_ctx.at(in_name); + + // Create a Shuffle instruction + auto shuffle = m->entity()->instr()->create(); + + shuffle->from(in_bag); + shuffle->into(out_bag); + + for (tensor::IndexEnumerator e{in_shape}; e.valid(); e.advance()) + { + // NOTE Caffe use lexical layout + static const LexicalLayout l; + const auto &in_index = e.current(); + + auto out_index = e.current(); + out_index.at(concat_axis) += out_base; + + const auto in_elem = static_cast(l.offset(in_shape, in_index)); + const auto out_elem = static_cast(l.offset(out_shape, out_index)); + + shuffle->insert(coco::ElemID{in_elem}, coco::ElemID{out_elem}); + } + + // Append the constructed Shuffle instruction + blk->instr()->append(shuffle); + + // Update out base + out_base += in_shape.dim(concat_axis); + } + + // Update bag and shape context + bag_ctx[out_name] = out_bag; + shape_ctx[out_name] = out_shape; + } else { throw std::runtime_error{"Not supported: " + layer.type()};