From: 박세희/동작제어Lab(SR)/Principal Engineer/삼성전자 Date: Thu, 6 Dec 2018 04:41:36 +0000 (+0900) Subject: [enco] frontend/caffe: Separate as_tensor_shape (#2520) X-Git-Tag: nncc_backup~1182 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=4df85487963691634e525796e977166bf755f178;p=platform%2Fcore%2Fml%2Fnnfw.git [enco] frontend/caffe: Separate as_tensor_shape (#2520) * [enco] frontend/caffe: Separate as_tensor_shape This will separate as_tensor_shape() to its own file with caffeimport namespace Signed-off-by: SaeHie Park * move using to global --- diff --git a/contrib/enco/frontend/caffe/src/Convert.cpp b/contrib/enco/frontend/caffe/src/Convert.cpp new file mode 100644 index 0000000..d697b1b --- /dev/null +++ b/contrib/enco/frontend/caffe/src/Convert.cpp @@ -0,0 +1,40 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#include "Convert.h" + +using namespace nncc::core::ADT; + +namespace caffeimport +{ + +tensor::Shape as_tensor_shape(const ::caffe::BlobShape &blob_shape) +{ + const uint32_t rank = blob_shape.dim_size(); + + tensor::Shape res; + + res.resize(rank); + + for (uint32_t axis = 0; axis < rank; ++axis) + { + res.dim(axis) = blob_shape.dim(axis); + } + + return res; +} + +} // namespace caffeimport diff --git a/contrib/enco/frontend/caffe/src/Convert.h b/contrib/enco/frontend/caffe/src/Convert.h new file mode 100644 index 0000000..9f6f9f1 --- /dev/null +++ b/contrib/enco/frontend/caffe/src/Convert.h @@ -0,0 +1,36 @@ +/* + * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +#ifndef __CONVERT_H__ +#define __CONVERT_H__ + +#include + +#include + +namespace caffeimport +{ + +nncc::core::ADT::tensor::Shape as_tensor_shape(const ::caffe::BlobShape &blob_shape); + +inline nncc::core::ADT::tensor::Shape as_tensor_shape(const ::caffe::BlobProto *blob_proto) +{ + return as_tensor_shape(blob_proto->shape()); +} + +} // namespace caffeimport + +#endif // __CONVERT_H__ diff --git a/contrib/enco/frontend/caffe/src/Frontend.cpp b/contrib/enco/frontend/caffe/src/Frontend.cpp index 94f15f0..01b9b18 100644 --- a/contrib/enco/frontend/caffe/src/Frontend.cpp +++ b/contrib/enco/frontend/caffe/src/Frontend.cpp @@ -19,6 +19,7 @@ #include "PoolingSpec.h" #include "ConcatSpec.h" #include "Context.h" +#include "Convert.h" #include #include @@ -54,32 +55,6 @@ using tensor::LexicalLayout; using nncc::foundation::make_unique; -namespace -{ - -tensor::Shape as_tensor_shape(const ::caffe::BlobShape &blob_shape) -{ - const uint32_t rank = blob_shape.dim_size(); - - tensor::Shape res; - - res.resize(rank); - - for (uint32_t axis = 0; axis < rank; ++axis) - { - res.dim(axis) = blob_shape.dim(axis); - } - - return res; -} - -tensor::Shape as_tensor_shape(const ::caffe::BlobProto *blob_proto) -{ - return as_tensor_shape(blob_proto->shape()); -} - -} // namespace - /** * coco IR builders */ @@ -297,7 +272,7 @@ enco::Bundle Frontend::load(void) const for (uint32_t n = 0; n < layer.top_size(); ++n) { const auto &name = layer.top(n); - const auto shape = as_tensor_shape(param.shape(n)); + const auto shape = caffeimport::as_tensor_shape(param.shape(n)); auto bag = m->entity()->bag()->create(num_elements(shape)); auto input = m->entity()->input()->create(shape); @@ -367,7 +342,7 @@ enco::Bundle Frontend::load(void) const assert(weight_ctx.blob_count(layer.name()) >= 1); auto ker_blob = weight_ctx.blob_get(layer.name(), 0); - assert(ker_shape == as_tensor_shape(ker_blob)); + assert(ker_shape == caffeimport::as_tensor_shape(ker_blob)); auto ker_dst = d->f32()->access(ker_obj); auto ker_src = kernel::OverlayFactory::make(