#include "Operand.h"
#include "Index.h"
+#include "util/ObjectManager.h"
namespace neurun
{
namespace model
{
-class Operands
+class Operands : public util::ObjectManager<OperandIndex, Operand>
{
-public:
- Operands() : _index_count(0) {}
-
-public:
- OperandIndex append(const Shape &, const TypeInfo &);
- void remove(const OperandIndex &index) { _objects.erase(index); };
-
-public:
- const Operand &at(const OperandIndex &) const;
- Operand &at(const OperandIndex &);
- bool exist(const OperandIndex &) const;
- void iterate(const std::function<void(const OperandIndex &, const Operand &)> &fn) const;
- void iterate(const std::function<void(const OperandIndex &, Operand &)> &fn);
-
-private:
- const OperandIndex generateIndex();
-
-private:
- std::unordered_map<OperandIndex, std::unique_ptr<Operand>> _objects;
- OperandIndex _index_count;
};
} // namespace model
--- /dev/null
+/*
+ * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef __NEURUN_UTIL_OBJECT_MANAGER_H__
+#define __NEURUN_UTIL_OBJECT_MANAGER_H__
+
+namespace neurun
+{
+namespace util
+{
+
+/**
+ * @brief Class that owns objects and maps them with indices as a handle for them
+ *
+ */
+template <typename Index, typename Object> class ObjectManager
+{
+public:
+ ObjectManager() : _index_count{0u} {}
+
+public:
+ /**
+ * @brief Create an object with args and put it in the container with a new Index for that
+ *
+ * @param[in] args Arguments for creating Operand object
+ * @return Created index that is associated to the object
+ */
+ template <class... Args> Index emplace(Args &&... args)
+ {
+ auto index = generateIndex();
+ _objects.emplace(index, nnfw::cpp14::make_unique<Object>(std::forward<Args>(args)...));
+ return index;
+ }
+
+ /**
+ * @brief Put object in the container with a new Index for that
+ *
+ * @param[in] object Object to be pushed
+ * @return Created index that is associated to the object
+ */
+ Index push(std::unique_ptr<Object> &&object)
+ {
+ auto index = generateIndex();
+ _objects.emplace(index, std::move(object));
+ return index;
+ }
+
+ /**
+ * @brief Remove the object that is associated with the given index
+ *
+ * @param[in] index Index of the object to be removed
+ * @return N/A
+ */
+ void remove(const Index &index) { _objects.erase(index); };
+
+ /**
+ * @brief Get the object that is associated with the given index
+ *
+ * @param[in] index Index of the object to be returned
+ * @return Object
+ */
+ const Object &at(const Index &index) const { return *(_objects.at(index)); }
+ /**
+ * @brief Get the object that is associated with the given index
+ *
+ * @param[in] index Index of the object to be returned
+ * @return Object
+ */
+ Object &at(const Index &index) { return *(_objects.at(index)); }
+ /**
+ * @brief Get the object that is associated with the given index
+ *
+ * @param[in] index Index of the object to be returned
+ * @return true if such entry exists otherwise false
+ */
+ bool exist(const Index &index) const
+ {
+ auto it = _objects.find(index);
+ return it != _objects.end();
+ }
+ /**
+ * @brief Iterate over the container with given function
+ *
+ * @param[in] fn Function to be run for every container entry
+ * @return N/A
+ */
+ void iterate(const std::function<void(const Index &, const Object &)> &fn) const
+ {
+ for (const auto &e : _objects)
+ {
+ fn(e.first, *e.second);
+ }
+ }
+ /**
+ * @brief Iterate over the container with given function
+ *
+ * @param[in] fn Function to be run for every container entry
+ * @return N/A
+ */
+ void iterate(const std::function<void(const Index &, Object &)> &fn)
+ {
+ // TODO Remove this workaround
+ // This implementation is a workaround in case of adding operands while iteration
+ std::list<Index> l;
+
+ for (auto &e : _objects)
+ {
+ l.push_back(e.first);
+ }
+
+ for (auto index : l)
+ {
+ fn(index, *_objects[index]);
+ }
+ }
+
+private:
+ Index generateIndex() { return Index{_index_count++}; }
+
+private:
+ std::unordered_map<Index, std::unique_ptr<Object>> _objects;
+ uint32_t _index_count;
+};
+
+} // namespace util
+} // namespace neurun
+
+#endif // __NEURUN_UTIL_OBJECT_MANAGER_H__
model::OperandIndex Graph::addOperand(const model::Shape &shape, const model::TypeInfo &type)
{
- return _model->operands.append(shape, type);
+ return _model->operands.emplace(shape, type);
}
model::OperationIndex Graph::addOperation(std::unique_ptr<model::Operation> &&node)
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "model/Operands.h"
-
-#include "cpp14/memory.h"
-
-namespace neurun
-{
-namespace model
-{
-
-const OperandIndex Operands::generateIndex()
-{
- assert(_index_count.valid());
-
- return _index_count++;
-}
-
-OperandIndex Operands::append(const Shape &shape, const TypeInfo &type)
-{
- auto OperandIndex = generateIndex();
-
- _objects[OperandIndex] = nnfw::cpp14::make_unique<Operand>(shape, type);
-
- return OperandIndex;
-}
-
-const Operand &Operands::at(const OperandIndex &OperandIndex) const
-{
- return *(_objects.at(OperandIndex));
-}
-
-Operand &Operands::at(const OperandIndex &OperandIndex) { return *(_objects.at(OperandIndex)); }
-
-bool Operands::exist(const OperandIndex &OperandIndex) const
-{
- auto it = _objects.find(OperandIndex);
- return it != _objects.end();
-}
-
-void Operands::iterate(const std::function<void(const OperandIndex &, const Operand &)> &fn) const
-{
- for (const auto &e : _objects)
- {
- fn(e.first, *e.second);
- }
-}
-
-void Operands::iterate(const std::function<void(const OperandIndex &, Operand &)> &fn)
-{
- // TODO Remove this workaround
- // This implementation is a workaround in case of adding operands while iteration
- //
- // // Original Implementation (We probably should be back to this)
- // for (auto &e : _objects)
- // {
- // fn(e.first, *e.second);
- // }
-
- std::list<OperandIndex> l;
-
- for (auto &e : _objects)
- {
- l.push_back(e.first);
- }
-
- for (auto OperandIndex : l)
- {
- fn(OperandIndex, *_objects[OperandIndex]);
- }
-}
-
-} // namespace model
-} // namespace neurun
{
const auto shape = NNAPIConvert::getShape(type);
const auto typeInfo = NNAPIConvert::getTypeInfo(type);
- _model->operands.append(shape, typeInfo);
+ _model->operands.emplace(shape, typeInfo);
_operand_usages.emplace_back(OperandUsage::NOT_DEFINED);
}
catch (const std::exception &e)
::neurun::model::TypeInfo type{neurun::model::DataType::INT32};
- set.append(shape0, type);
- set.append(shape1, type);
+ set.emplace(shape0, type);
+ set.emplace(shape1, type);
ASSERT_EQ(set.exist(neurun::model::OperandIndex{0u}), true);
ASSERT_EQ(set.exist(neurun::model::OperandIndex{1u}), true);
neurun::model::TypeInfo type{neurun::model::DataType::INT32};
// Model Input/Output
- auto input_operand = model->operands.append(shape, type);
- auto output_operand = model->operands.append(shape, type);
+ auto input_operand = model->operands.emplace(shape, type);
+ auto output_operand = model->operands.emplace(shape, type);
model->inputs.append(input_operand);
model->outputs.append(output_operand);
// MockNode1
- auto operand_index1 = model->operands.append(shape, type);
+ auto operand_index1 = model->operands.emplace(shape, type);
auto mocknode_index1 = model->operations.append(
nnfw::cpp14::make_unique<MockNode>(IndexSet{input_operand}, IndexSet{operand_index1}));
// MockNode2
- auto operand_index2 = model->operands.append(shape, type);
+ auto operand_index2 = model->operands.emplace(shape, type);
auto mocknode_index2 = model->operations.append(
nnfw::cpp14::make_unique<MockNode>(IndexSet{input_operand}, IndexSet{operand_index2}));
// Add Conv
using GraphNode = neurun::model::operation::Conv2DNode;
- auto input_operand = model.operands.append(shape, type);
- auto kernel_operand = model.operands.append(shape, type);
- auto bias_operand = model.operands.append(shape, type);
+ auto input_operand = model.operands.emplace(shape, type);
+ auto kernel_operand = model.operands.emplace(shape, type);
+ auto bias_operand = model.operands.emplace(shape, type);
IndexSet inputs{input_operand, kernel_operand, bias_operand};
GraphNode::Param conv_params;
conv_params.stride.vertical = 1;
conv_params.activation = neurun::model::Activation::NONE;
- auto output_operand = model.operands.append(shape, type).value();
+ auto output_operand = model.operands.emplace(shape, type).value();
IndexSet outputs{output_operand};
auto conv = nnfw::cpp14::make_unique<GraphNode>(inputs, outputs, conv_params);
IndexSet inputs;
for (int i = 0; i < 6; ++i)
{
- inputs.append(model.operands.append(shape, type));
+ inputs.append(model.operands.emplace(shape, type));
}
GraphNode::Param concat_params;
- concat_params.axis_index = model.operands.append(shape, type);
+ concat_params.axis_index = model.operands.emplace(shape, type);
- auto output_operand = model.operands.append(shape, type).value();
+ auto output_operand = model.operands.emplace(shape, type).value();
IndexSet outputs{output_operand};
auto concat = nnfw::cpp14::make_unique<GraphNode>(inputs, outputs, concat_params);
::neurun::model::Shape shape{3};
::neurun::model::TypeInfo type{neurun::model::DataType::INT32};
- auto operand1 = model->operands.append(shape, type);
- auto operand2 = model->operands.append(shape, type);
+ auto operand1 = model->operands.emplace(shape, type);
+ auto operand2 = model->operands.emplace(shape, type);
model->inputs.append(operand1);
model->outputs.append(operand2);
Shape shape_scalar(0);
TypeInfo type_scalar{DataType::INT32};
- auto operand_lhs = model->operands.append(shape, type);
- auto operand_rhs = model->operands.append(shape, type);
- auto operand_result = model->operands.append(shape, type);
+ auto operand_lhs = model->operands.emplace(shape, type);
+ auto operand_rhs = model->operands.emplace(shape, type);
+ auto operand_result = model->operands.emplace(shape, type);
// Add operations
static int32_t rhs2_data[4] = {3, 1, -1, 5};
- auto operand_lhs = model->operands.append(shape, type);
- auto operand_rhs1 = model->operands.append(shape, type);
- auto operand_result1 = model->operands.append(shape, type);
- auto operand_rhs2 = model->operands.append(shape, type);
- auto operand_result2 = model->operands.append(shape, type);
+ auto operand_lhs = model->operands.emplace(shape, type);
+ auto operand_rhs1 = model->operands.emplace(shape, type);
+ auto operand_result1 = model->operands.emplace(shape, type);
+ auto operand_rhs2 = model->operands.emplace(shape, type);
+ auto operand_result2 = model->operands.emplace(shape, type);
model->operands.at(operand_rhs2)
.data(nnfw::cpp14::make_unique<CachedData>(reinterpret_cast<const uint8_t *>(&rhs2_data),
16));
Shape shape_scalar(0);
TypeInfo type_scalar{DataType::INT32};
- auto operand_lhs = model->operands.append(shape, type);
- auto operand_rhs = model->operands.append(shape, type);
+ auto operand_lhs = model->operands.emplace(shape, type);
+ auto operand_rhs = model->operands.emplace(shape, type);
- auto operand_activation = model->operands.append(shape_scalar, type_scalar);
+ auto operand_activation = model->operands.emplace(shape_scalar, type_scalar);
model->operands.at(operand_activation)
.data(nnfw::cpp14::make_unique<CachedData>(
reinterpret_cast<const uint8_t *>(&_activation_value), 4));
- auto operand_result = model->operands.append(shape, type);
+ auto operand_result = model->operands.emplace(shape, type);
// Add operations