--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * @note: This cpp file exist to check compilation integrity
+ */
+
+#include "Context.h"
--- /dev/null
+/*
+ * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __CONTEXT_H__
+#define __CONTEXT_H__
+
+#include <caffe/proto/caffe.pb.h>
+
+#include <cassert>
+#include <map>
+#include <string>
+
+namespace caffeimport
+{
+
+using LayerName = std::string;
+
+class WeightContext
+{
+public:
+ WeightContext(::caffe::NetParameter *caffemodel) : _caffemodel(caffemodel)
+ {
+ for (uint32_t n = 0; n < _caffemodel->layer_size(); ++n)
+ {
+ auto layer = _caffemodel->mutable_layer(n);
+
+ if (layer->has_name())
+ {
+ _data[layer->name()] = layer;
+ }
+ }
+ }
+
+public:
+ int blob_count(const LayerName &name)
+ {
+ if (_data.find(name) != _data.end())
+ return _data.at(name)->blobs_size();
+
+ assert(false);
+ return 0;
+ }
+
+ ::caffe::BlobProto *blob_get(const LayerName &name, uint32_t n)
+ {
+ if (_data.find(name) != _data.end())
+ return _data.at(name)->mutable_blobs(n);
+
+ assert(false);
+ return nullptr;
+ };
+
+private:
+ ::caffe::NetParameter *_caffemodel;
+ std::map<LayerName, ::caffe::LayerParameter *> _data;
+};
+
+} // namespace caffeimport
+
+#endif // __CONTEXT_H__
#include "ConvolutionSpec.h"
#include "PoolingSpec.h"
#include "ConcatSpec.h"
+#include "Context.h"
#include <coco/IR/FeatureLayouts.h>
#include <coco/IR/KernelLayouts.h>
auto d = coco::Data::create();
// For weight access
- std::map<std::string, ::caffe::LayerParameter *> weight_ctx;
-
- for (uint32_t n = 0; n < _caffemodel->layer_size(); ++n)
- {
- auto layer = _caffemodel->mutable_layer(n);
-
- if (layer->has_name())
- {
- weight_ctx[layer->name()] = layer;
- }
- }
-
- auto blob_count = [&weight_ctx](const std::string &name) {
- return weight_ctx.at(name)->blobs_size();
- };
-
- auto blob_get = [&weight_ctx](const std::string &name, uint32_t n) {
- return weight_ctx.at(name)->mutable_blobs(n);
- };
+ caffeimport::WeightContext weight_ctx(_caffemodel.get());
// For inter-layer communication
std::map<std::string, tensor::Shape> shape_ctx;
d->f32()->allocate(ker_obj);
// Initialize the kernel overlay
- assert(blob_count(layer.name()) >= 1);
- auto ker_blob = blob_get(layer.name(), 0);
+ assert(weight_ctx.blob_count(layer.name()) >= 1);
+ auto ker_blob = weight_ctx.blob_get(layer.name(), 0);
assert(ker_shape == as_tensor_shape(ker_blob));
//
if (param.bias_term())
{
- assert(blob_count(layer.name()) >= 2);
+ assert(weight_ctx.blob_count(layer.name()) >= 2);
// Create Bag & Object
auto bias_bag = m->entity()->bag()->create(ker_shape.dim(0));
d->f32()->allocate(bias_bag);
auto bias_span = d->f32()->weight(bias_bag);
- auto bias_blob = blob_get(layer.name(), 1);
+ auto bias_blob = weight_ctx.blob_get(layer.name(), 1);
for (uint32_t ch = 0; ch < ker_obj->shape().count(); ++ch)
{
assert(param.axis() == 1);
assert(!param.has_num_axes());
- assert(blob_count(layer.name()) >= 1);
+ assert(weight_ctx.blob_count(layer.name()) >= 1);
// NOTE The shape of "Scale" output is same as that of its input
// NOTE The current implementation assumes that input/output is of feature type
d->f32()->allocate(factor_bag);
auto span = d->f32()->weight(factor_bag);
- auto blob = blob_get(layer.name(), 0);
+ auto blob = weight_ctx.blob_get(layer.name(), 0);
for (uint32_t ch = 0; ch < factor_obj->shape().depth(); ++ch)
{
// Create bias addition (as channel-wise addition)
if (param.bias_term())
{
- assert(blob_count(layer.name()) >= 2);
+ assert(weight_ctx.blob_count(layer.name()) >= 2);
auto in_bag = last_bag; /* Use the output of the last computation as an input */
auto in_obj = m->entity()->object()->create<coco::FeatureObject>();
d->f32()->allocate(bias_bag);
auto bias_span = d->f32()->weight(bias_bag);
- auto bias_blob = blob_get(layer.name(), 1);
+ auto bias_blob = weight_ctx.blob_get(layer.name(), 1);
for (uint32_t ch = 0; ch < bias_obj->shape().depth(); ++ch)
{