* Move some files from `model` into `ir` directory.
* Move contained symbols into `neurun::ir` namespace, fixing uses where possible.
Signed-off-by: Sergei Barannikov <s.barannikov@samsung.com>
{
try
{
- _execution->setInput(neurun::model::IOIndex(index), buffer, length);
+ _execution->setInput(neurun::ir::IOIndex(index), buffer, length);
}
catch (...)
{
{
try
{
- _execution->setOutput(neurun::model::IOIndex(index), buffer, length);
+ _execution->setOutput(neurun::ir::IOIndex(index), buffer, length);
}
catch (...)
{
std::cerr << "Error during nnfw_session::set_input_layout, not supported layout" << std::endl;
return NNFW_STATUS_ERROR;
}
- _execution->setInputLayout(neurun::model::IOIndex(index), convertLayout(layout));
+ _execution->setInputLayout(neurun::ir::IOIndex(index), convertLayout(layout));
}
catch (...)
{
<< std::endl;
return NNFW_STATUS_ERROR;
}
- _execution->setOutputLayout(neurun::model::IOIndex(index), convertLayout(layout));
+ _execution->setOutputLayout(neurun::ir::IOIndex(index), convertLayout(layout));
}
catch (...)
{
#include <memory>
#include <backend/Backend.h>
-#include <model/Operands.h>
+#include <ir/Operands.h>
#include "Config.h"
#include "ConstantInitializer.h"
std::shared_ptr<IConfig> config() const override { return _config; }
std::unique_ptr<BackendContext>
- newContext(const model::Operands &operands,
+ newContext(const ir::Operands &operands,
const std::shared_ptr<custom::IKernelBuilder> &) const override
{
auto tensor_builder = std::make_shared<TensorBuilder>(createTensorManager());
namespace acl_cl
{
-ConstantInitializer::ConstantInitializer(const model::Operands &operands,
+ConstantInitializer::ConstantInitializer(const ir::Operands &operands,
const std::shared_ptr<TensorBuilder> &tensor_builder)
: _operands{operands}, _tensor_builder{tensor_builder}
{
if (block_size_obj.isConstant())
{
- _init_map[block_size_index] = [](const model::Operand &model_obj,
- backend::operand::ITensor &obj) {
+ _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) {
const auto &shape = model_obj.shape();
const auto base = reinterpret_cast<const int32_t *>(model_obj.data().base());
assert(model_obj.shape().rank() == 1);
if (block_size_obj.isConstant())
{
- _init_map[block_size_index] = [](const model::Operand &model_obj,
- backend::operand::ITensor &obj) {
+ _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) {
const auto &shape = model_obj.shape();
const auto base = reinterpret_cast<const int32_t *>(model_obj.data().base());
assert(model_obj.shape().rank() == 1);
const auto &paddings_obj = _operands.at(paddings_index);
if (paddings_obj.isConstant())
{
- _init_map[paddings_index] = [](const model::Operand &model_obj,
- backend::operand::ITensor &obj) {
+ _init_map[paddings_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) {
const auto &shape = model_obj.shape();
const auto base = reinterpret_cast<const int32_t *>(model_obj.data().base());
assert(model_obj.shape().rank() == 2);
#define __NEURUN_COMPILER_ACL_CL_CONSTANT_INITIALIZER_H__
#include <backend/IConstantInitializer.h>
-#include <model/Operands.h>
+#include <ir/Operands.h>
#include "TensorBuilder.h"
namespace neurun
class ConstantInitializer : public IConstantInitializer
{
public:
- ConstantInitializer(const model::Operands &operands,
+ ConstantInitializer(const ir::Operands &operands,
const std::shared_ptr<TensorBuilder> &tensor_builder);
public:
void visit(const model::operation::TransposeConv &) override;
private:
- const model::Operands &operands() const override { return _operands; }
+ const ir::Operands &operands() const override { return _operands; }
std::shared_ptr<ITensorBuilder> tensor_builder() const override { return _tensor_builder; }
private:
- const model::Operands &_operands;
+ const ir::Operands &_operands;
std::shared_ptr<TensorBuilder> _tensor_builder;
};
#include <Swizzle.h>
#include "kernel/ConcatLayer.h"
-#include "model/Index.h"
+#include "ir/Index.h"
#include "ir/DataType.h"
#include "ir/InternalType.h"
#include "compiler/IExecutionBuilder.h"
//
// KernelGenerator
//
-KernelGenerator::KernelGenerator(const neurun::model::Operands &ctx,
+KernelGenerator::KernelGenerator(const ir::Operands &ctx,
const std::shared_ptr<TensorBuilder> &tensor_builder)
: _ctx(ctx), _tensor_builder(tensor_builder), _current_subg_layout(ir::Layout::UNKNOWN)
{
{
const auto ofm_index{node.getOutputs().at(0)};
- std::vector<model::OperandIndex> input_indexes;
+ std::vector<ir::OperandIndex> input_indexes;
for (const auto &input : node.getInputs())
input_indexes.emplace_back(input);
const auto output_rank = _ctx.at(output_index).shape().rank();
- std::vector<model::OperandIndex> input_indexes;
+ std::vector<ir::OperandIndex> input_indexes;
for (const auto &input_index : node.getInputs())
input_indexes.emplace_back(input_index);
assert(node.param().num_splits == static_cast<int>(node.getOutputs().size()));
const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
- std::vector<model::OperandIndex> output_indexes;
+ std::vector<ir::OperandIndex> output_indexes;
for (const auto &output : node.getOutputs())
output_indexes.emplace_back(output);
const auto input_rank = _ctx.at(input_index).shape().rank();
- std::vector<model::OperandIndex> output_indexes;
+ std::vector<ir::OperandIndex> output_indexes;
for (const auto &output_index : node.getOutputs())
output_indexes.emplace_back(output_index);
#include <backend/IKernelGenerator.h>
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "TensorBuilder.h"
namespace neurun
class KernelGenerator : public IKernelGenerator
{
public:
- KernelGenerator(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
+ KernelGenerator(const ir::Operands &ctx, const std::shared_ptr<TensorBuilder> &tensor_builder);
void visit(const model::Subgraph &) override;
void visit(const model::operation::BatchToSpaceND &) override;
void visit(const model::operation::Pad &) override;
private:
- const neurun::model::Operands &_ctx;
+ const ir::Operands &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
ir::Layout _current_subg_layout;
};
#include <Swizzle.h>
#include "kernel/ConcatLayer.h"
-#include "model/Index.h"
+#include "ir/Index.h"
#include "compiler/IExecutionBuilder.h"
#include "exec/NopFunction.h"
#include "util/logging.h"
using ::neurun::backend::acl_common::asAclFunction;
-ShapeFixer::ShapeFixer(const neurun::model::Operands &ctx,
+ShapeFixer::ShapeFixer(const ir::Operands &ctx,
const std::shared_ptr<TensorBuilder> &tensor_builder)
: _ctx(ctx), _tensor_builder(tensor_builder)
{
#include <backend/IShapeFixer.h>
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "TensorBuilder.h"
namespace neurun
class ShapeFixer : public IShapeFixer
{
public:
- ShapeFixer(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
+ ShapeFixer(const ir::Operands &ctx, const std::shared_ptr<TensorBuilder> &tensor_builder);
std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
void visit(const model::operation::Pad &) override;
private:
- const neurun::model::Operands &_ctx;
+ const ir::Operands &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
};
class TensorRegister : public acl_common::AclTensorRegister
{
public:
- TensorRegister(const model::Operands &operands,
- const std::shared_ptr<TensorBuilder> &tensor_builder)
+ TensorRegister(const ir::Operands &operands, const std::shared_ptr<TensorBuilder> &tensor_builder)
: acl_common::AclTensorRegister{operands, tensor_builder}
{
// DO NOTHING
}
- void setUsesCount(const model::OperandIndex &ind, size_t num_uses) const override
+ void setUsesCount(const ir::OperandIndex &ind, size_t num_uses) const override
{
nnfw::misc::polymorphic_downcast<TensorBuilder *>(tensor_builder().get())
->setUsesCount(ind, num_uses);
#include <cassert>
#include "AclMemoryManager.h"
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
#include "util/logging.h"
namespace
_io_manager->clear();
}
- virtual void startLifetime(const model::OperandIndex &ind) override
+ virtual void startLifetime(const ir::OperandIndex &ind) override
{
auto &tensors = this->tensors();
assert(tensors.find(ind) != tensors.end());
_io_group->manage(tensor->handle());
}
- virtual void finishLifetime(const model::OperandIndex &ind) override
+ virtual void finishLifetime(const ir::OperandIndex &ind) override
{
auto &tensors = this->tensors();
assert(tensors.find(ind) != tensors.end());
#include <cassert>
#include "backend/IMemoryManager.h"
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
#include "Convert.h"
#include "util/logging.h"
}
}
- virtual void startLifetime(const model::OperandIndex &) { /* DO NOTHING */}
- virtual void finishLifetime(const model::OperandIndex &) { /* DO NOTHING */}
+ virtual void startLifetime(const ir::OperandIndex &) { /* DO NOTHING */}
+ virtual void finishLifetime(const ir::OperandIndex &) { /* DO NOTHING */}
- void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info,
- size_t rank, size_t num_uses)
+ void buildTensor(const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank,
+ size_t num_uses)
{
auto tensor = std::make_shared<T_Tensor>(info, rank, num_uses);
_tensors[ind] = tensor;
}
- void buildSubtensor(std::shared_ptr<T_ITensor> parent_tensor,
- const model::OperandIndex &child_ind, const ::arm_compute::TensorShape &shape,
+ void buildSubtensor(std::shared_ptr<T_ITensor> parent_tensor, const ir::OperandIndex &child_ind,
+ const ::arm_compute::TensorShape &shape,
const ::arm_compute::Coordinates &coordinates, size_t rank,
bool extent_parent)
{
_subtensors[child_ind] = subtensor;
}
- model::OperandIndexMap<std::shared_ptr<T_Tensor>> &tensors(void) { return _tensors; }
+ ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &tensors(void) { return _tensors; }
- model::OperandIndexMap<std::shared_ptr<T_SubTensor>> &subtensors(void) { return _subtensors; }
+ ir::OperandIndexMap<std::shared_ptr<T_SubTensor>> &subtensors(void) { return _subtensors; }
private:
- model::OperandIndexMap<std::shared_ptr<T_Tensor>> _tensors;
- model::OperandIndexMap<std::shared_ptr<T_SubTensor>> _subtensors;
+ ir::OperandIndexMap<std::shared_ptr<T_Tensor>> _tensors;
+ ir::OperandIndexMap<std::shared_ptr<T_SubTensor>> _subtensors;
};
} // namespace acl_common
#include "backend/ITensorManager.h"
#include "AclMemoryManager.h"
#include "AclInternalBufferManager.h"
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
namespace neurun
{
void allocateInternalBufferManager(void);
void deallocateInternalBufferManager(void);
- void buildTensor(const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info,
- size_t rank, bool as_const, size_t num_uses);
- void buildSubtensor(const model::OperandIndex &parent, const model::OperandIndex &child,
+ void buildTensor(const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank,
+ bool as_const, size_t num_uses);
+ void buildSubtensor(const ir::OperandIndex &parent, const ir::OperandIndex &child,
const ::arm_compute::TensorShape &shape,
const ::arm_compute::Coordinates &coordinates, size_t rank,
bool extent_parent);
- std::shared_ptr<T_ITensor> findTensorAsParent(const model::OperandIndex &ind);
+ std::shared_ptr<T_ITensor> findTensorAsParent(const ir::OperandIndex &ind);
- void startLifetime(const model::OperandIndex &ind);
- void finishLifetime(const model::OperandIndex &ind);
+ void startLifetime(const ir::OperandIndex &ind);
+ void finishLifetime(const ir::OperandIndex &ind);
- std::shared_ptr<T_ITensor> at(const ::neurun::model::OperandIndex &ind);
+ std::shared_ptr<T_ITensor> at(const ir::OperandIndex &ind);
- model::OperandIndexMap<std::shared_ptr<T_Tensor>> &constTensors(void);
- model::OperandIndexMap<std::shared_ptr<T_Tensor>> &nonconstTensors(void);
- model::OperandIndexMap<std::shared_ptr<T_SubTensor>> &nonconstSubtensors(void);
+ ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &constTensors(void);
+ ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &nonconstTensors(void);
+ ir::OperandIndexMap<std::shared_ptr<T_SubTensor>> &nonconstSubtensors(void);
std::shared_ptr<::arm_compute::IMemoryManager> internal_buffer_manager(void);
- void iterate(const std::function<void(const model::OperandIndex &)> &fn);
+ void iterate(const std::function<void(const ir::OperandIndex &)> &fn);
void tryDeallocConstants(void);
std::unique_ptr<T_AclMemoryManager> _const_mgr;
std::unique_ptr<T_AclMemoryManager> _nonconst_mgr;
std::unique_ptr<IInternalBufferManager> _inter_mgr;
- model::OperandIndexMap<T_AclMemoryManager &> _ind_to_mgr;
+ ir::OperandIndexMap<T_AclMemoryManager &> _ind_to_mgr;
};
} // namespace acl_common
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::buildTensor(
- const model::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank,
- bool as_const, size_t num_uses)
+ const ir::OperandIndex &ind, const ::arm_compute::TensorInfo &info, size_t rank, bool as_const,
+ size_t num_uses)
{
assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
if (as_const)
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::buildSubtensor(
- const model::OperandIndex &parent, const model::OperandIndex &child,
+ const ir::OperandIndex &parent, const ir::OperandIndex &child,
const ::arm_compute::TensorShape &shape, const ::arm_compute::Coordinates &coordinates,
size_t rank, bool extent_parent)
{
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-std::shared_ptr<T_ITensor> AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::findTensorAsParent(
- const model::OperandIndex &ind)
+std::shared_ptr<T_ITensor>
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::findTensorAsParent(const ir::OperandIndex &ind)
{
auto &tensors = _nonconst_mgr->tensors();
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::startLifetime(
- const model::OperandIndex &ind)
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::startLifetime(const ir::OperandIndex &ind)
{
assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
_ind_to_mgr.at(ind).startLifetime(ind);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::finishLifetime(
- const model::OperandIndex &ind)
+void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::finishLifetime(const ir::OperandIndex &ind)
{
assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
_ind_to_mgr.at(ind).finishLifetime(ind);
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
std::shared_ptr<T_ITensor>
-AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::at(const ::neurun::model::OperandIndex &ind)
+AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::at(const ir::OperandIndex &ind)
{
assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-model::OperandIndexMap<std::shared_ptr<T_Tensor>> &
+ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &
AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::constTensors(void)
{
return _const_mgr->tensors();
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-model::OperandIndexMap<std::shared_ptr<T_Tensor>> &
+ir::OperandIndexMap<std::shared_ptr<T_Tensor>> &
AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::nonconstTensors(void)
{
return _nonconst_mgr->tensors();
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-model::OperandIndexMap<std::shared_ptr<T_SubTensor>> &
+ir::OperandIndexMap<std::shared_ptr<T_SubTensor>> &
AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::nonconstSubtensors(void)
{
return _nonconst_mgr->subtensors();
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
void AclTensorManager<T_ITensor, T_Tensor, T_SubTensor>::iterate(
- const std::function<void(const model::OperandIndex &)> &fn)
+ const std::function<void(const ir::OperandIndex &)> &fn)
{
for (auto it : _nonconst_mgr->tensors())
fn(it.first);
namespace acl_common
{
-AclTensorRegister::AclTensorRegister(const model::Operands &operands,
+AclTensorRegister::AclTensorRegister(const ir::Operands &operands,
const std::shared_ptr<ITensorBuilder> &tensor_builder)
: _operands{operands}, _tensor_builder{tensor_builder}
{
class AclTensorRegister : public ITensorRegister
{
protected:
- AclTensorRegister(const model::Operands &operands,
+ AclTensorRegister(const ir::Operands &operands,
const std::shared_ptr<ITensorBuilder> &tensor_builder);
public:
protected:
void visit(const model::Subgraph &subgraph);
- virtual void setUsesCount(const model::OperandIndex &ind, size_t num_uses) const = 0;
+ virtual void setUsesCount(const ir::OperandIndex &ind, size_t num_uses) const = 0;
protected:
- const model::Operands &operands() const override { return _operands; }
+ const ir::Operands &operands() const override { return _operands; }
std::shared_ptr<ITensorBuilder> tensor_builder() const override { return _tensor_builder; }
bool supportSubTensor() const final { return true; }
private:
- const model::Operands &_operands;
+ const ir::Operands &_operands;
const std::shared_ptr<ITensorBuilder> _tensor_builder;
};
#include "ir/Layout.h"
#include "ir/InternalType.h"
-#include "model/Operand.h"
+#include "ir/Operand.h"
#include "ir/Shape.h"
#include "ir/TypeInfo.h"
#include "misc/feature/Shape.h"
#include <arm_compute/core/Types.h>
#include <backend/ITensorBuilder.h>
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
#include "AclTensorManager.h"
#include "cpp14/memory.h"
#include <util/Utils.h>
* @param[in] info Tensor information
* @param[in] layout Tensor data layout
*/
- void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
+ void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
ir::Layout backend_layout, bool as_const) override;
/**
* @brief Register subtensor information to allocate on ACL-CL backend
* @param[in] ind Operand index
* @param[in] info Tensor information
*/
- void registerSubTensorInfo(const model::OperandIndex &ind,
+ void registerSubTensorInfo(const ir::OperandIndex &ind,
const compiler::SubTensorInfo &info) override;
- void notifyFirstUse(const model::OperandIndex &) override;
- void notifyLastUse(const model::OperandIndex &) override;
+ void notifyFirstUse(const ir::OperandIndex &) override;
+ void notifyLastUse(const ir::OperandIndex &) override;
- bool isRegistered(const model::OperandIndex &) const override;
+ bool isRegistered(const ir::OperandIndex &) const override;
void prepare(void) override;
void allocateConsts() override;
void finalize() override;
std::shared_ptr<::neurun::backend::operand::ITensor>
- tensorAt(const model::OperandIndex &ind) override;
+ tensorAt(const ir::OperandIndex &ind) override;
void iterate(const IterateFunction &fn) override;
void preVisit(const model::Operation &node) override;
std::unique_ptr<ITensorManager> releaseTensorManager(void) override;
- std::shared_ptr<T_ITensor> at(const ::neurun::model::OperandIndex &ind);
+ std::shared_ptr<T_ITensor> at(const ir::OperandIndex &ind);
/**
* @brief Check child tensor is allocated as subtensor of parent tensor
* @param[in] parent Index of parent
* @param[in] child Index of child
* @return @c true if child is allocated as subtensor of parent, otherwise @c false
*/
- bool isSubTensorOf(const model::OperandIndex &parent, const model::OperandIndex &child);
+ bool isSubTensorOf(const ir::OperandIndex &parent, const ir::OperandIndex &child);
- void dimCorrection(const model::OperandIndex &index, bool apply_dim_correction);
+ void dimCorrection(const ir::OperandIndex &index, bool apply_dim_correction);
T_AclTensorManager *acl_tensor_manager(void) { return _tensor_mgr.get(); }
- void setUsesCount(const model::OperandIndex &index, size_t num_uses)
+ void setUsesCount(const ir::OperandIndex &index, size_t num_uses)
{
assert(_uses_count_map.find(index) != _uses_count_map.end() ? _uses_count_map[index] == num_uses
: true);
void buildTensors(void);
void buildSubtensors(void);
void validate(void);
- model::OperandIndex findRootParent(model::OperandIndex index);
+ ir::OperandIndex findRootParent(ir::OperandIndex index);
private:
- model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
- model::OperandIndexMap<compiler::SubTensorInfo> _subtensor_info_map;
- model::OperandIndexMap<bool> _apply_dim_correction_map;
- model::OperandIndexMap<ir::Layout> _tensor_layout_map;
- model::OperandIndexMap<size_t> _uses_count_map;
+ ir::OperandIndexMap<ir::OperandInfo> _tensor_info_map;
+ ir::OperandIndexMap<compiler::SubTensorInfo> _subtensor_info_map;
+ ir::OperandIndexMap<bool> _apply_dim_correction_map;
+ ir::OperandIndexMap<ir::Layout> _tensor_layout_map;
+ ir::OperandIndexMap<size_t> _uses_count_map;
std::unique_ptr<T_AclTensorManager> _tensor_mgr;
- model::OperandIndexSequence _constants;
+ ir::OperandIndexSequence _constants;
// TODO Consider dividing TensorBuilder into Linear and others
const std::string _executor_str;
// for linear executor
- std::queue<std::pair<UsesType, model::OperandIndex>> _uses_queue;
+ std::queue<std::pair<UsesType, ir::OperandIndex>> _uses_queue;
uint32_t _first_uses_num;
- model::OperandIndexMap<bool> _first_uses_visit;
+ ir::OperandIndexMap<bool> _first_uses_visit;
// for subtensors
- model::OperandIndexMap<uint32_t> _parent_def;
- model::OperandIndexMap<uint32_t> _parent_uses;
+ ir::OperandIndexMap<uint32_t> _parent_def;
+ ir::OperandIndexMap<uint32_t> _parent_uses;
};
} // namespace acl_common
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::registerTensorInfo(
- const model::OperandIndex &ind, const model::OperandInfo &info, ir::Layout backend_layout,
+ const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout backend_layout,
bool as_const)
{
assert(_tensor_mgr->constTensors().size() == 0);
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::registerSubTensorInfo(
- const model::OperandIndex &ind, const compiler::SubTensorInfo &info)
+ const ir::OperandIndex &ind, const compiler::SubTensorInfo &info)
{
assert(_tensor_mgr->constTensors().size() == 0);
assert(_tensor_mgr->nonconstTensors().size() == 0);
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::notifyFirstUse(
- const model::OperandIndex &ind)
+ const ir::OperandIndex &ind)
{
_first_uses_num++;
_uses_queue.emplace(UsesType::FIRST, ind);
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::notifyLastUse(
- const model::OperandIndex &ind)
+ const ir::OperandIndex &ind)
{
_uses_queue.emplace(UsesType::LAST, ind);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
bool TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::isRegistered(
- const model::OperandIndex &ind) const
+ const ir::OperandIndex &ind) const
{
return _tensor_info_map.find(ind) != _tensor_info_map.end() ||
_subtensor_info_map.find(ind) != _subtensor_info_map.end();
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
std::shared_ptr<::neurun::backend::operand::ITensor>
-TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::tensorAt(const model::OperandIndex &ind)
+TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::tensorAt(const ir::OperandIndex &ind)
{
return _tensor_mgr->at(ind);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
std::shared_ptr<T_ITensor>
-TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::at(const ::neurun::model::OperandIndex &ind)
+TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::at(const ir::OperandIndex &ind)
{
return _tensor_mgr->at(ind);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
bool TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::isSubTensorOf(
- const model::OperandIndex &parent, const model::OperandIndex &child)
+ const ir::OperandIndex &parent, const ir::OperandIndex &child)
{
if (_subtensor_info_map.find(child) == _subtensor_info_map.end())
{
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::dimCorrection(
- const model::OperandIndex &index, bool apply_dim_correction)
+ const ir::OperandIndex &index, bool apply_dim_correction)
{
_apply_dim_correction_map[index] = apply_dim_correction;
}
auto &subtensors = _tensor_mgr->nonconstSubtensors();
for (auto &entry : _subtensor_info_map)
{
- model::OperandIndex ind = entry.first;
+ ir::OperandIndex ind = entry.first;
- std::stack<model::OperandIndex> stack;
+ std::stack<ir::OperandIndex> stack;
stack.push(ind);
while (!stack.empty())
return;
}
- std::function<void(const model::OperandIndex &ind)> def_handler =
- [this, &def_handler](const model::OperandIndex &ind) {
+ std::function<void(const ir::OperandIndex &ind)> def_handler =
+ [this, &def_handler](const ir::OperandIndex &ind) {
bool is_subtensor = _subtensor_info_map.find(ind) != _subtensor_info_map.end();
bool is_parent = _parent_def.find(ind) != _parent_def.end();
if (!is_subtensor && !is_parent)
}
else if (is_subtensor)
{
- const model::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent();
+ const ir::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent();
if (_parent_def[parent_ind] == 0)
return;
def_handler(parent_ind);
};
// See #5642
- model::OperandIndexMap<bool> outputs_map;
+ ir::OperandIndexMap<bool> outputs_map;
for (const auto &ind : node.getOutputs())
{
assert(_first_uses_visit.find(ind) != _first_uses_visit.end());
// outputs_map's all elements are true?
auto outputs_map_all_check = [&outputs_map]() {
return std::all_of(outputs_map.begin(), outputs_map.end(),
- [](std::pair<const model::OperandIndex, bool> it) { return it.second; });
+ [](std::pair<const ir::OperandIndex, bool> it) { return it.second; });
};
- std::pair<UsesType, model::OperandIndex> peak;
+ std::pair<UsesType, ir::OperandIndex> peak;
while (!outputs_map_all_check() && (peak = _uses_queue.front()).first == UsesType::FIRST)
{
_uses_queue.pop();
return;
}
- std::function<void(const model::OperandIndex &ind)> use_handler =
- [this, &use_handler](const model::OperandIndex &ind) {
+ std::function<void(const ir::OperandIndex &ind)> use_handler =
+ [this, &use_handler](const ir::OperandIndex &ind) {
bool is_subtensor = _subtensor_info_map.find(ind) != _subtensor_info_map.end();
bool is_parent = _parent_uses.find(ind) != _parent_uses.end();
if (!is_subtensor && !is_parent)
}
else if (is_subtensor)
{
- const model::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent();
+ const ir::OperandIndex &parent_ind = _subtensor_info_map.at(ind).parent();
--_parent_uses[parent_ind];
assert(_parent_uses[parent_ind] > 0);
}
// See #5642
const auto &inputs = node.getInputs();
- std::pair<UsesType, model::OperandIndex> peak;
+ std::pair<UsesType, ir::OperandIndex> peak;
while ((peak = _uses_queue.front()).first == UsesType::LAST)
{
const auto &popped_idx = peak.second;
assert(_uses_queue.size() == 0);
assert(_first_uses_num == 0);
- assert(std::all_of(
- _parent_def.begin(), _parent_def.end(),
- [](std::pair<const model::OperandIndex, uint32_t> it) { return it.second == 0; }));
+ assert(
+ std::all_of(_parent_def.begin(), _parent_def.end(),
+ [](std::pair<const ir::OperandIndex, uint32_t> it) { return it.second == 0; }));
- assert(std::all_of(
- _parent_uses.begin(), _parent_uses.end(),
- [](std::pair<const model::OperandIndex, uint32_t> it) { return it.second == 0; }));
+ assert(
+ std::all_of(_parent_uses.begin(), _parent_uses.end(),
+ [](std::pair<const ir::OperandIndex, uint32_t> it) { return it.second == 0; }));
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor>
-model::OperandIndex
-TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::findRootParent(model::OperandIndex ind)
+ir::OperandIndex
+TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor>::findRootParent(ir::OperandIndex ind)
{
if (_subtensor_info_map.find(ind) == _subtensor_info_map.end())
return ind;
#include <memory>
#include <backend/Backend.h>
-#include <model/Operands.h>
+#include <ir/Operands.h>
#include "Config.h"
#include "ConstantInitializer.h"
std::shared_ptr<IConfig> config() const override { return _config; }
std::unique_ptr<BackendContext>
- newContext(const model::Operands &operands,
+ newContext(const ir::Operands &operands,
const std::shared_ptr<custom::IKernelBuilder> &) const override
{
auto tensor_builder = std::make_shared<TensorBuilder>(createTensorManager());
namespace acl_neon
{
-ConstantInitializer::ConstantInitializer(const model::Operands &operands,
+ConstantInitializer::ConstantInitializer(const ir::Operands &operands,
const std::shared_ptr<TensorBuilder> &tensor_builder)
: _operands{operands}, _tensor_builder{tensor_builder}
{
if (block_size_obj.isConstant())
{
- _init_map[block_size_index] = [](const model::Operand &model_obj,
- backend::operand::ITensor &obj) {
+ _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) {
const auto &shape = model_obj.shape();
const auto base = reinterpret_cast<const int32_t *>(model_obj.data().base());
assert(model_obj.shape().rank() == 1);
if (block_size_obj.isConstant())
{
- _init_map[block_size_index] = [](const model::Operand &model_obj,
- backend::operand::ITensor &obj) {
+ _init_map[block_size_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) {
const auto &shape = model_obj.shape();
const auto base = reinterpret_cast<const int32_t *>(model_obj.data().base());
assert(model_obj.shape().rank() == 1);
const auto &paddings_obj = _operands.at(paddings_index);
if (paddings_obj.isConstant())
{
- _init_map[paddings_index] = [](const model::Operand &model_obj,
- backend::operand::ITensor &obj) {
+ _init_map[paddings_index] = [](const ir::Operand &model_obj, backend::operand::ITensor &obj) {
const auto &shape = model_obj.shape();
const auto base = reinterpret_cast<const int32_t *>(model_obj.data().base());
assert(model_obj.shape().rank() == 2);
#define __NEURUN_COMPILER_ACL_NEON_CONSTANT_INITIALIZER_H__
#include <backend/IConstantInitializer.h>
-#include <model/Operands.h>
+#include <ir/Operands.h>
#include "TensorBuilder.h"
namespace neurun
class ConstantInitializer : public IConstantInitializer
{
public:
- ConstantInitializer(const model::Operands &operands,
+ ConstantInitializer(const ir::Operands &operands,
const std::shared_ptr<TensorBuilder> &tensor_builder);
public:
void visit(const model::operation::TransposeConv &) override;
private:
- const model::Operands &operands() const override { return _operands; }
+ const ir::Operands &operands() const override { return _operands; }
std::shared_ptr<ITensorBuilder> tensor_builder() const override { return _tensor_builder; }
private:
- const model::Operands &_operands;
+ const ir::Operands &_operands;
std::shared_ptr<TensorBuilder> _tensor_builder;
};
#include "kernel/ConcatLayer.h"
#include "util/Padding.h"
-#include "model/Index.h"
+#include "ir/Index.h"
#include "ir/DataType.h"
#include "ir/InternalType.h"
#include "compiler/IExecutionBuilder.h"
//
// KernelGenerator
//
-KernelGenerator::KernelGenerator(const neurun::model::Operands &ctx,
+KernelGenerator::KernelGenerator(const ir::Operands &ctx,
const std::shared_ptr<TensorBuilder> &tensor_builder)
: _ctx(ctx), _tensor_builder(tensor_builder), _current_subg_layout(ir::Layout::UNKNOWN)
{
{
const auto ofm_index{node.getOutputs().at(0)};
- std::vector<model::OperandIndex> input_indexes;
+ std::vector<ir::OperandIndex> input_indexes;
for (const auto &input : node.getInputs())
input_indexes.emplace_back(input);
const auto output_rank = _ctx.at(output_index).shape().rank();
- std::vector<model::OperandIndex> input_indexes;
+ std::vector<ir::OperandIndex> input_indexes;
for (const auto &input_index : node.getInputs())
input_indexes.emplace_back(input_index);
assert(node.param().num_splits == static_cast<int>(node.getOutputs().size()));
const auto ifm_rank = _ctx.at(ifm_index).shape().rank();
- std::vector<model::OperandIndex> output_indexes;
+ std::vector<ir::OperandIndex> output_indexes;
for (const auto &output : node.getOutputs())
output_indexes.emplace_back(output);
const auto input_rank = _ctx.at(input_index).shape().rank();
- std::vector<model::OperandIndex> output_indexes;
+ std::vector<ir::OperandIndex> output_indexes;
for (const auto &output_index : node.getOutputs())
output_indexes.emplace_back(output_index);
#include <backend/IKernelGenerator.h>
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "TensorBuilder.h"
namespace neurun
class KernelGenerator : public IKernelGenerator
{
public:
- KernelGenerator(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
+ KernelGenerator(const ir::Operands &ctx, const std::shared_ptr<TensorBuilder> &tensor_builder);
void visit(const model::Subgraph &) override;
void visit(const model::operation::Abs &) override;
void visit(const model::operation::Comparison &) override;
private:
- const neurun::model::Operands &_ctx;
+ const ir::Operands &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
ir::Layout _current_subg_layout;
};
#include "kernel/ConcatLayer.h"
#include "util/Padding.h"
-#include "model/Index.h"
+#include "ir/Index.h"
#include "compiler/IExecutionBuilder.h"
#include "exec/NopFunction.h"
#include "util/logging.h"
using ::neurun::backend::acl_common::asAclFunction;
-ShapeFixer::ShapeFixer(const neurun::model::Operands &ctx,
+ShapeFixer::ShapeFixer(const ir::Operands &ctx,
const std::shared_ptr<TensorBuilder> &tensor_builder)
: _ctx(ctx), _tensor_builder(tensor_builder)
{
#include <backend/IShapeFixer.h>
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "TensorBuilder.h"
namespace neurun
class ShapeFixer : public IShapeFixer
{
public:
- ShapeFixer(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
+ ShapeFixer(const ir::Operands &ctx, const std::shared_ptr<TensorBuilder> &tensor_builder);
std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
void visit(const model::operation::Comparison &) override;
private:
- const neurun::model::Operands &_ctx;
+ const ir::Operands &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
};
class TensorRegister : public acl_common::AclTensorRegister
{
public:
- TensorRegister(const model::Operands &operands,
- const std::shared_ptr<TensorBuilder> &tensor_builder)
+ TensorRegister(const ir::Operands &operands, const std::shared_ptr<TensorBuilder> &tensor_builder)
: acl_common::AclTensorRegister{operands, tensor_builder}
{
// DO NOTHING
}
- void setUsesCount(const model::OperandIndex &ind, size_t num_uses) const override
+ void setUsesCount(const ir::OperandIndex &ind, size_t num_uses) const override
{
nnfw::misc::polymorphic_downcast<TensorBuilder *>(tensor_builder().get())
->setUsesCount(ind, num_uses);
#include <memory>
#include <backend/Backend.h>
-#include <model/Operands.h>
+#include <ir/Operands.h>
#include "Config.h"
#include "ConstantInitializer.h"
std::shared_ptr<IConfig> config() const override { return _config; }
std::unique_ptr<BackendContext>
- newContext(const model::Operands &operands,
+ newContext(const ir::Operands &operands,
const std::shared_ptr<custom::IKernelBuilder> &kb) const override
{
auto tensor_builder = std::make_shared<TensorBuilder>();
namespace cpu
{
-ConstantInitializer::ConstantInitializer(const model::Operands &operands,
+ConstantInitializer::ConstantInitializer(const ir::Operands &operands,
const std::shared_ptr<TensorBuilder> &tensor_builder)
: _operands{operands}, _tensor_builder{tensor_builder}
{
#define __NEURUN_COMPILER_CPU_CONSTANT_INITIALIZER_H__
#include <backend/IConstantInitializer.h>
-#include <model/Operands.h>
+#include <ir/Operands.h>
#include "TensorBuilder.h"
namespace neurun
class ConstantInitializer : public IConstantInitializer
{
public:
- ConstantInitializer(const model::Operands &operands,
+ ConstantInitializer(const ir::Operands &operands,
const std::shared_ptr<TensorBuilder> &tensor_builder);
public:
void visit(const model::operation::FullyConnected &) override;
private:
- const model::Operands &operands() const override { return _operands; }
+ const ir::Operands &operands() const override { return _operands; }
std::shared_ptr<ITensorBuilder> tensor_builder() const override { return _tensor_builder; }
private:
- const model::Operands &_operands;
+ const ir::Operands &_operands;
std::shared_ptr<TensorBuilder> _tensor_builder;
};
{
KernelGenerator::KernelGenerator(
- const neurun::model::Operands &operand_ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
+ const ir::Operands &operand_ctx, const std::shared_ptr<TensorBuilder> &tensor_builder,
const std::shared_ptr<backend::custom::IKernelBuilder> &kernel_builer)
: _ctx(operand_ctx), _tensor_builder(tensor_builder), _kernel_builder(kernel_builer),
_current_subg_layout(ir::Layout::UNKNOWN)
void KernelGenerator::visit(const model::operation::Custom &node)
{
- auto get_type_info = [this](const model::Operand &operand) -> custom::TypeInfo {
+ auto get_type_info = [this](const ir::Operand &operand) -> custom::TypeInfo {
auto backendDescr =
::neurun::backend::cpu::kernel::getTensorDescriptor(operand, _current_subg_layout);
return {shape, backendDescr.type};
};
- auto fill_op_info = [&](const model::OperandIndexSequence &opSeq,
+ auto fill_op_info = [&](const ir::OperandIndexSequence &opSeq,
std::vector<custom::TypeInfo> &types, std::vector<void *> &allocs) {
for (auto &idx : opSeq)
{
#define __NEURUN_BACKEND_CPU_KERNEL_GENERATOR_H__
#include "backend/IKernelGenerator.h"
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "operand/Tensor.h"
#include "backend/CustomKernelBuilder.h"
#include "TensorBuilder.h"
class KernelGenerator : public IKernelGenerator
{
public:
- KernelGenerator(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
+ KernelGenerator(const ir::Operands &ctx, const std::shared_ptr<TensorBuilder> &tensor_builder,
const std::shared_ptr<custom::IKernelBuilder> &kernel_builder);
using IKernelGenerator::visit;
void visit(const model::operation::Pad &);
private:
- const neurun::model::Operands &_ctx;
+ const ir::Operands &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
std::shared_ptr<backend::custom::IKernelBuilder> _kernel_builder;
ir::Layout _current_subg_layout;
return MemoryPlannerFactory::get().create(planner_id);
}
-void MemoryManager::buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info)
+void MemoryManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info)
{
auto tensor = std::make_shared<operand::Tensor>(info);
_tensors[ind] = tensor;
}
-void MemoryManager::claimPlan(const model::OperandIndex &ind, uint32_t size)
+void MemoryManager::claimPlan(const ir::OperandIndex &ind, uint32_t size)
{
_mem_planner->claim(ind, size);
}
-void MemoryManager::releasePlan(const model::OperandIndex &ind) { _mem_planner->release(ind); }
+void MemoryManager::releasePlan(const ir::OperandIndex &ind) { _mem_planner->release(ind); }
void MemoryManager::allocate(void)
{
#include "backend/IMemoryManager.h"
#include "MemoryPlanner.h"
#include "operand/Tensor.h"
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
namespace neurun
{
void allocate(void) override;
void deallocate(void) override { _mem_alloc->release(); }
- void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info);
- void claimPlan(const model::OperandIndex &ind, uint32_t size);
- void releasePlan(const model::OperandIndex &ind);
+ void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info);
+ void claimPlan(const ir::OperandIndex &ind, uint32_t size);
+ void releasePlan(const ir::OperandIndex &ind);
- model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &tensors(void) { return _tensors; }
+ ir::OperandIndexMap<std::shared_ptr<operand::Tensor>> &tensors(void) { return _tensors; }
private:
IMemoryPlanner *createMemoryPlanner();
private:
- model::OperandIndexMap<std::shared_ptr<operand::Tensor>> _tensors;
- model::OperandIndexMap<Block> _tensor_mem_map;
+ ir::OperandIndexMap<std::shared_ptr<operand::Tensor>> _tensors;
+ ir::OperandIndexMap<Block> _tensor_mem_map;
std::shared_ptr<IMemoryPlanner> _mem_planner;
std::shared_ptr<Allocator> _mem_alloc;
};
VERBOSE(ALLOC) << "base pointer: " << static_cast<void *>(_base.get()) << std::endl;
}
-void BumpPlanner::claim(const model::OperandIndex &ind, size_t size)
+void BumpPlanner::claim(const ir::OperandIndex &ind, size_t size)
{
assert(size != 0);
<< std::endl;
}
-void BumpPlanner::release(const model::OperandIndex &ind)
+void BumpPlanner::release(const ir::OperandIndex &ind)
{
VERBOSE(BP_PLANNER) << "RELEASE(#" << ind.value() << "): "
<< "NOTHING does" << std::endl;
// There are some assumptions for claiming memory(== making a reservation for memory).
// 1. About _claim_table(std::map).
// - The table's data structure is std::map so that it always sorts
-// value(model::OperandIndex) by key(base_offset).
+// value(OperandIndex) by key(base_offset).
// - This claim() inserts key/value into _claim_table and the release() removes the key/value from
// _claim_table.
// - _claim_table shows the memory status at a certain point in time. Therefore,
// point in time, it means the place at the offset can be claimed.
// 2. In the loop for _claim_table, we can assume the current claim_base_offset value is bigger than
// the previous claim_base_offset.
-void FirstFitPlanner::claim(const model::OperandIndex &ind, size_t size)
+void FirstFitPlanner::claim(const ir::OperandIndex &ind, size_t size)
{
assert(size != 0);
}
}
-void FirstFitPlanner::release(const model::OperandIndex &ind)
+void FirstFitPlanner::release(const ir::OperandIndex &ind)
{
for (auto it = _claim_table.cbegin(); it != _claim_table.cend(); ++it)
{
#include <map>
#include <cpp14/memory.h>
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
namespace neurun
{
*/
struct IMemoryPlanner
{
- using MemoryPlans = model::OperandIndexMap<Block>;
+ using MemoryPlans = ir::OperandIndexMap<Block>;
/**
* @brief Claim memory for operand
* @param[in] index The operand index
* @param[in] size The size of the memory
*/
- virtual void claim(const model::OperandIndex &, size_t) = 0;
+ virtual void claim(const ir::OperandIndex &, size_t) = 0;
/**
* @brief Release memory for operand
* @param[in] index The operand index
*/
- virtual void release(const model::OperandIndex &) = 0;
+ virtual void release(const ir::OperandIndex &) = 0;
/**
* @brief Get capacity for memory planning
* @return The value of capacity
* @param[in] index The operand index
* @param[in] size The size of the memory
*/
- void claim(const model::OperandIndex &, size_t) override;
+ void claim(const ir::OperandIndex &, size_t) override;
/**
* @brief Release memory for operand by bump way
* @param[in] index The operand index
*/
- void release(const model::OperandIndex &) override;
+ void release(const ir::OperandIndex &) override;
/**
* @brief Get capacity for memory planning
* @return The value of capacity
* @param[in] index The operand index
* @param[in] size The size of the memory
*/
- void claim(const model::OperandIndex &, size_t) override;
+ void claim(const ir::OperandIndex &, size_t) override;
/**
* @brief Release memory for operand by firstfit way
* @param[in] index The operand index
*/
- void release(const model::OperandIndex &) override;
+ void release(const ir::OperandIndex &) override;
/**
* @brief Get capacity for memory planning
* @return The value of capacity
uint32_t _capacity = 0;
MemoryPlans _mem_plans;
// Use std::map because claim() assumes that _claim_table is sorted by uint32_t(base_offset)
- std::map<uint32_t, model::OperandIndex> _claim_table;
+ std::map<uint32_t, ir::OperandIndex> _claim_table;
};
} // namespace cpu
#include <gtest/gtest.h>
#include "MemoryPlanner.h"
-#include "model/Index.h"
+#include "ir/Index.h"
TEST(Allocator, allocate_test)
{
::neurun::backend::cpu::BumpPlanner planner;
auto claim = [&planner](uint32_t index, size_t size, uint32_t expected_offset) {
- ::neurun::model::OperandIndex mem_idx(index);
+ neurun::ir::OperandIndex mem_idx(index);
planner.claim(mem_idx, size);
auto mem_blk = planner.memory_plans()[mem_idx];
ASSERT_EQ(mem_blk.offset, expected_offset);
::neurun::backend::cpu::FirstFitPlanner planner;
auto claim = [&planner](uint32_t index, size_t size, uint32_t expected_offset) {
- ::neurun::model::OperandIndex mem_idx(index);
+ neurun::ir::OperandIndex mem_idx(index);
planner.claim(mem_idx, size);
auto mem_blk = planner.memory_plans()[mem_idx];
ASSERT_EQ(mem_blk.offset, expected_offset);
};
auto release = [&planner](uint32_t index) {
- ::neurun::model::OperandIndex mem_idx(index);
+ neurun::ir::OperandIndex mem_idx(index);
planner.release(mem_idx);
};
namespace cpu
{
-ShapeFixer::ShapeFixer(const neurun::model::Operands &operand_ctx,
+ShapeFixer::ShapeFixer(const ir::Operands &operand_ctx,
const std::shared_ptr<TensorBuilder> &tensor_builder)
: _ctx(operand_ctx), _tensor_builder(tensor_builder)
{
#include <backend/IShapeFixer.h>
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "operand/Tensor.h"
#include "TensorBuilder.h"
class ShapeFixer : public IShapeFixer
{
public:
- ShapeFixer(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
+ ShapeFixer(const ir::Operands &ctx, const std::shared_ptr<TensorBuilder> &tensor_builder);
std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
void visit(const model::operation::Pad &);
private:
- const neurun::model::Operands &_ctx;
+ const ir::Operands &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
};
// DO NOTHING
}
-void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
- const model::OperandInfo &info, ir::Layout, bool as_const)
+void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
+ ir::Layout, bool as_const)
{
_tensor_info_map.emplace(ind, info);
_constants.append(ind);
}
-void TensorBuilder::registerSubTensorInfo(const model::OperandIndex &,
- const compiler::SubTensorInfo &)
+void TensorBuilder::registerSubTensorInfo(const ir::OperandIndex &, const compiler::SubTensorInfo &)
{
// Not supported yet
assert(false);
}
-void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind)
+void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind)
{
assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
const auto tensor_info = _tensor_info_map.at(ind);
_tensor_mgr->claimPlan(ind, size);
}
-void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); }
+void TensorBuilder::notifyLastUse(const ir::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); }
-bool TensorBuilder::isRegistered(const model::OperandIndex &ind) const
+bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const
{
return _tensor_info_map.find(ind) != _tensor_info_map.end();
}
}
std::shared_ptr<::neurun::backend::operand::ITensor>
-TensorBuilder::tensorAt(const model::OperandIndex &ind)
+TensorBuilder::tensorAt(const ir::OperandIndex &ind)
{
return _tensor_mgr->at(ind);
}
void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); }
-std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::model::OperandIndex &ind)
+std::shared_ptr<operand::Tensor> TensorBuilder::at(const ir::OperandIndex &ind)
{
return _tensor_mgr->at(ind);
}
#include <backend/ITensorBuilder.h>
#include "operand/Tensor.h"
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
#include "TensorManager.h"
namespace neurun
* @param[in] info Operand information
* @param[in] layout Operand data layout
*/
- void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
+ void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
ir::Layout backend_layout, bool as_const) override;
/**
* @brief Register subtensor information to allocate on CPU backend
* @param[in] ind Operand index
* @param[in] info Tensor information
*/
- void registerSubTensorInfo(const model::OperandIndex &ind,
+ void registerSubTensorInfo(const ir::OperandIndex &ind,
const compiler::SubTensorInfo &info) override;
- void notifyFirstUse(const model::OperandIndex &) override;
- void notifyLastUse(const model::OperandIndex &) override;
+ void notifyFirstUse(const ir::OperandIndex &) override;
+ void notifyLastUse(const ir::OperandIndex &) override;
- bool isRegistered(const model::OperandIndex &) const override;
+ bool isRegistered(const ir::OperandIndex &) const override;
void prepare(void) override;
void allocateConsts() override;
void finalize() override { /* DO NOTHING */}
std::shared_ptr<::neurun::backend::operand::ITensor>
- tensorAt(const model::OperandIndex &ind) override;
+ tensorAt(const ir::OperandIndex &ind) override;
void iterate(const IterateFunction &fn) override;
std::unique_ptr<ITensorManager> releaseTensorManager(void) override;
- std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
+ std::shared_ptr<operand::Tensor> at(const ir::OperandIndex &ind);
private:
std::unique_ptr<TensorManager> _tensor_mgr;
- model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
- model::OperandIndexSequence _constants;
+ ir::OperandIndexMap<ir::OperandInfo> _tensor_info_map;
+ ir::OperandIndexSequence _constants;
};
} // namespace cpu
void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); }
-void TensorManager::buildTensor(const model::OperandIndex &ind,
- const model::OperandInfo &tensor_info, bool as_const)
+void TensorManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info,
+ bool as_const)
{
assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
if (as_const)
}
}
-void TensorManager::claimPlan(const model::OperandIndex &ind, uint32_t size)
+void TensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size)
{
assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
_ind_to_mgr.at(ind).claimPlan(ind, size);
}
-void TensorManager::releasePlan(const model::OperandIndex &ind)
+void TensorManager::releasePlan(const ir::OperandIndex &ind)
{
assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
_ind_to_mgr.at(ind).releasePlan(ind);
}
-std::shared_ptr<operand::Tensor> TensorManager::at(const ::neurun::model::OperandIndex &ind)
+std::shared_ptr<operand::Tensor> TensorManager::at(const ir::OperandIndex &ind)
{
assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
return _ind_to_mgr.at(ind).tensors().at(ind);
}
-model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::constTensors(void)
+ir::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::constTensors(void)
{
return _const_mgr->tensors();
}
-model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::nonconstTensors(void)
+ir::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::nonconstTensors(void)
{
return _nonconst_mgr->tensors();
}
-void TensorManager::iterate(const std::function<void(const model::OperandIndex &)> &fn)
+void TensorManager::iterate(const std::function<void(const ir::OperandIndex &)> &fn)
{
for (auto it : _nonconst_mgr->tensors())
fn(it.first);
#include "backend/ITensorManager.h"
#include "MemoryManager.h"
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
namespace neurun
{
void deallocateConsts(void) override;
void deallocateNonconsts(void) override;
- void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &tensor_info,
- bool as_const);
+ void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info, bool as_const);
- void claimPlan(const model::OperandIndex &ind, uint32_t size);
- void releasePlan(const model::OperandIndex &ind);
+ void claimPlan(const ir::OperandIndex &ind, uint32_t size);
+ void releasePlan(const ir::OperandIndex &ind);
- std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
+ std::shared_ptr<operand::Tensor> at(const ir::OperandIndex &ind);
- model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &constTensors(void);
- model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &nonconstTensors(void);
+ ir::OperandIndexMap<std::shared_ptr<operand::Tensor>> &constTensors(void);
+ ir::OperandIndexMap<std::shared_ptr<operand::Tensor>> &nonconstTensors(void);
- void iterate(const std::function<void(const model::OperandIndex &)> &fn);
+ void iterate(const std::function<void(const ir::OperandIndex &)> &fn);
private:
std::unique_ptr<MemoryManager> _const_mgr;
std::unique_ptr<MemoryManager> _nonconst_mgr;
- model::OperandIndexMap<MemoryManager &> _ind_to_mgr;
+ ir::OperandIndexMap<MemoryManager &> _ind_to_mgr;
};
} // namespace cpu
namespace cpu
{
-TensorRegister::TensorRegister(const model::Operands &operands,
+TensorRegister::TensorRegister(const ir::Operands &operands,
const std::shared_ptr<TensorBuilder> &tensor_builder)
: _operands{operands}, _tensor_builder{tensor_builder}
{
class TensorRegister : public ITensorRegister
{
public:
- TensorRegister(const model::Operands &operands,
+ TensorRegister(const ir::Operands &operands,
const std::shared_ptr<TensorBuilder> &tensor_builder);
private:
- const model::Operands &operands() const override { return _operands; }
+ const ir::Operands &operands() const override { return _operands; }
std::shared_ptr<ITensorBuilder> tensor_builder() const override { return _tensor_builder; }
bool supportSubTensor() const final { return false; }
private:
- const model::Operands &_operands;
+ const ir::Operands &_operands;
const std::shared_ptr<TensorBuilder> _tensor_builder;
};
return static_cast<int32_t>(std::floor(max_input_rescaled));
}
-TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout)
+TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout)
{
TensorDescriptor descriptor;
#include <cker/Shape.h>
-#include "model/Operand.h"
+#include "ir/Operand.h"
#include "ir/DataType.h"
#include <ir/InternalType.h>
int32_t CalculateInputRadius(int input_integer_bits, int input_left_shift);
-TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout);
+TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout);
uint32_t sizeOfData(OperandType type, const std::vector<uint32_t> &dimensions);
#define __NEURUN_BACKEND_CPU_OPERAND_TENSOR_H__
#include <backend/operand/ITensor.h>
-#include "model/OperandInfo.h"
+#include "ir/OperandInfo.h"
namespace neurun
{
Tensor() = delete;
public:
- Tensor(const model::OperandInfo &info) : _info(info)
+ Tensor(const ir::OperandInfo &info) : _info(info)
{
// DO NOTHING
}
void access(const std::function<void(ITensor &tensor)> &fn) final;
private:
- model::OperandInfo _info;
+ ir::OperandInfo _info;
uint8_t *_buffer = nullptr;
};
#include <backend/IKernelGenerator.h>
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "TensorBuilder.h"
namespace neurun
class KernelGenerator : public IKernelGenerator
{
public:
- KernelGenerator(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
+ KernelGenerator(const Operands &ctx, const std::shared_ptr<TensorBuilder> &tensor_builder);
// TODO add more ops
private:
- const neurun::model::Operands &_ctx;
+ const Operands &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
};
#include <unordered_map>
#include <backend/ITensorBuilder.h>
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
namespace neurun
{
#include <memory>
#include <backend/Backend.h>
-#include <model/Operands.h>
+#include <ir/Operands.h>
#include "Config.h"
#include "ConstantInitializer.h"
std::shared_ptr<IConfig> config() const override { return _config; }
std::unique_ptr<BackendContext>
- newContext(const model::Operands &operands,
+ newContext(const ir::Operands &operands,
const std::shared_ptr<custom::IKernelBuilder> &kb) const override
{
auto tensor_builder = std::make_shared<TensorBuilder>();
{
template <typename T>
-static void PermuteKernel(const neurun::model::Operand &model_obj,
+static void PermuteKernel(const neurun::ir::Operand &model_obj,
neurun::backend::operand::ITensor &obj,
const std::vector<int32_t> &permutation)
{
namespace srcn
{
-ConstantInitializer::ConstantInitializer(const model::Operands &operands,
+ConstantInitializer::ConstantInitializer(const ir::Operands &operands,
const std::shared_ptr<TensorBuilder> &tensor_builder)
: _operands{operands}, _tensor_builder{tensor_builder}
{
// DO NOTHING
}
-void ConstantInitializer::registerPermuteKernelInitializer(const model::OperandIndex &index,
- const model::Operand &obj,
+void ConstantInitializer::registerPermuteKernelInitializer(const ir::OperandIndex &index,
+ const ir::Operand &obj,
const std::vector<int32_t> &permutation)
{
// For only CONSTANTS
#define __NEURUN_COMPILER_SRCN_CONSTANT_INITIALIZER_H__
#include <backend/IConstantInitializer.h>
-#include <model/Operands.h>
+#include <ir/Operands.h>
#include "TensorBuilder.h"
#include <util/Coordinates.h>
class ConstantInitializer : public IConstantInitializer
{
public:
- ConstantInitializer(const model::Operands &operands,
+ ConstantInitializer(const ir::Operands &operands,
const std::shared_ptr<TensorBuilder> &tensor_builder);
public:
- void registerPermuteKernelInitializer(const model::OperandIndex &index, const model::Operand &obj,
+ void registerPermuteKernelInitializer(const ir::OperandIndex &index, const ir::Operand &obj,
const std::vector<int32_t> &permutation);
public:
void visit(const model::operation::TransposeConv &) override;
private:
- const model::Operands &operands() const override { return _operands; }
+ const ir::Operands &operands() const override { return _operands; }
std::shared_ptr<ITensorBuilder> tensor_builder() const override { return _tensor_builder; }
private:
- const model::Operands &_operands;
+ const ir::Operands &_operands;
std::shared_ptr<TensorBuilder> _tensor_builder;
};
return ret;
}
-model::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo,
- ir::Layout frontend_layout, ir::Layout backend_layout)
+ir::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo,
+ ir::Layout frontend_layout, ir::Layout backend_layout)
{
- model::OperandInfo info(asTensorShape(shape, frontend_layout, backend_layout), typeInfo);
+ ir::OperandInfo info(asTensorShape(shape, frontend_layout, backend_layout), typeInfo);
return info;
}
#include <ir/Layout.h>
#include <ir/Shape.h>
#include <ir/TypeInfo.h>
-#include <model/OperandInfo.h>
+#include <ir/OperandInfo.h>
namespace neurun
{
ir::Shape asTensorShape(const ir::Shape &shape, ir::Layout frontend_layout,
ir::Layout backend_layout);
-model::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo,
- ir::Layout frontend_layout, ir::Layout backend_layout);
+ir::OperandInfo asTensorInfo(const ir::Shape &shape, const ir::TypeInfo &typeInfo,
+ ir::Layout frontend_layout, ir::Layout backend_layout);
} // namespace srcn
} // namespace backend
namespace srcn
{
-KernelGenerator::KernelGenerator(const neurun::model::Operands &operand_ctx,
+KernelGenerator::KernelGenerator(const ir::Operands &operand_ctx,
const std::shared_ptr<TensorBuilder> &tensor_builder,
const std::shared_ptr<custom::IKernelBuilder> &kb)
: _ctx(operand_ctx), _tensor_builder(tensor_builder), _kernel_builder(kb),
#define __NEURUN_BACKEND_SRCN_KERNEL_GENERATOR_H__
#include "backend/IKernelGenerator.h"
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "operand/Tensor.h"
#include "backend/CustomKernelBuilder.h"
#include "TensorBuilder.h"
class KernelGenerator : public IKernelGenerator
{
public:
- KernelGenerator(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
+ KernelGenerator(const ir::Operands &ctx, const std::shared_ptr<TensorBuilder> &tensor_builder,
const std::shared_ptr<custom::IKernelBuilder> &kb);
using IKernelGenerator::visit;
void visit(const model::operation::Add &) override;
private:
- const neurun::model::Operands &_ctx;
+ const ir::Operands &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
std::shared_ptr<custom::IKernelBuilder> _kernel_builder;
ir::Layout _current_subg_layout;
return MemoryPlannerFactory::get().create(planner_id);
}
-void MemoryManager::buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info,
+void MemoryManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info,
ir::Layout layout)
{
auto tensor = std::make_shared<operand::Tensor>(info, layout);
_tensors[ind] = tensor;
}
-void MemoryManager::claimPlan(const model::OperandIndex &ind, uint32_t size)
+void MemoryManager::claimPlan(const ir::OperandIndex &ind, uint32_t size)
{
_mem_planner->claim(ind, size);
}
-void MemoryManager::releasePlan(const model::OperandIndex &ind) { _mem_planner->release(ind); }
+void MemoryManager::releasePlan(const ir::OperandIndex &ind) { _mem_planner->release(ind); }
void MemoryManager::allocate(void)
{
#include "backend/IMemoryManager.h"
#include "MemoryPlanner.h"
#include "operand/Tensor.h"
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
namespace neurun
{
void allocate(void) override;
void deallocate(void) override { _mem_alloc->release(); }
- void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &info,
- ir::Layout layout);
- void claimPlan(const model::OperandIndex &ind, uint32_t size);
- void releasePlan(const model::OperandIndex &ind);
+ void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &info, ir::Layout layout);
+ void claimPlan(const ir::OperandIndex &ind, uint32_t size);
+ void releasePlan(const ir::OperandIndex &ind);
- model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &tensors(void) { return _tensors; }
+ ir::OperandIndexMap<std::shared_ptr<operand::Tensor>> &tensors(void) { return _tensors; }
private:
IMemoryPlanner *createMemoryPlanner();
private:
- model::OperandIndexMap<std::shared_ptr<operand::Tensor>> _tensors;
- model::OperandIndexMap<Block> _tensor_mem_map;
+ ir::OperandIndexMap<std::shared_ptr<operand::Tensor>> _tensors;
+ ir::OperandIndexMap<Block> _tensor_mem_map;
std::shared_ptr<IMemoryPlanner> _mem_planner;
std::shared_ptr<Allocator> _mem_alloc;
};
VERBOSE(ALLOC) << "base pointer: " << static_cast<void *>(_base.get()) << std::endl;
}
-void BumpPlanner::claim(const model::OperandIndex &ind, size_t size)
+void BumpPlanner::claim(const ir::OperandIndex &ind, size_t size)
{
assert(size != 0);
<< std::endl;
}
-void BumpPlanner::release(const model::OperandIndex &ind)
+void BumpPlanner::release(const ir::OperandIndex &ind)
{
VERBOSE(BP_PLANNER) << "RELEASE(#" << ind.value() << "): "
<< "NOTHING does" << std::endl;
// There are some assumptions for claiming memory(== making a reservation for memory).
// 1. About _claim_table(std::map).
// - The table's data structure is std::map so that it always sorts
-// value(model::OperandIndex) by key(base_offset).
+// value(OperandIndex) by key(base_offset).
// - This claim() inserts key/value into _claim_table and the release() removes the key/value from
// _claim_table.
// - _claim_table shows the memory status at a certain point in time. Therefore,
// point in time, it means the place at the offset can be claimed.
// 2. In the loop for _claim_table, we can assume the current claim_base_offset value is bigger than
// the previous claim_base_offset.
-void FirstFitPlanner::claim(const model::OperandIndex &ind, size_t size)
+void FirstFitPlanner::claim(const ir::OperandIndex &ind, size_t size)
{
assert(size != 0);
}
}
-void FirstFitPlanner::release(const model::OperandIndex &ind)
+void FirstFitPlanner::release(const ir::OperandIndex &ind)
{
for (auto it = _claim_table.cbegin(); it != _claim_table.cend(); ++it)
{
#include <map>
#include <cpp14/memory.h>
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
namespace neurun
{
*/
struct IMemoryPlanner
{
- using MemoryPlans = model::OperandIndexMap<Block>;
+ using MemoryPlans = ir::OperandIndexMap<Block>;
/**
* @brief Claim memory for operand
* @param[in] index The operand index
* @param[in] size The size of the memory
*/
- virtual void claim(const model::OperandIndex &, size_t) = 0;
+ virtual void claim(const ir::OperandIndex &, size_t) = 0;
/**
* @brief Release memory for operand
* @param[in] index The operand index
*/
- virtual void release(const model::OperandIndex &) = 0;
+ virtual void release(const ir::OperandIndex &) = 0;
/**
* @brief Get capacity for memory planning
* @return The value of capacity
* @param[in] index The operand index
* @param[in] size The size of the memory
*/
- void claim(const model::OperandIndex &, size_t) override;
+ void claim(const ir::OperandIndex &, size_t) override;
/**
* @brief Release memory for operand by bump way
* @param[in] index The operand index
*/
- void release(const model::OperandIndex &) override;
+ void release(const ir::OperandIndex &) override;
/**
* @brief Get capacity for memory planning
* @return The value of capacity
* @param[in] index The operand index
* @param[in] size The size of the memory
*/
- void claim(const model::OperandIndex &, size_t) override;
+ void claim(const ir::OperandIndex &, size_t) override;
/**
* @brief Release memory for operand by firstfit way
* @param[in] index The operand index
*/
- void release(const model::OperandIndex &) override;
+ void release(const ir::OperandIndex &) override;
/**
* @brief Get capacity for memory planning
* @return The value of capacity
uint32_t _capacity = 0;
MemoryPlans _mem_plans;
// Use std::map because claim() assumes that _claim_table is sorted by uint32_t(base_offset)
- std::map<uint32_t, model::OperandIndex> _claim_table;
+ std::map<uint32_t, ir::OperandIndex> _claim_table;
};
} // namespace srcn
namespace srcn
{
-ShapeFixer::ShapeFixer(const neurun::model::Operands &operand_ctx,
+ShapeFixer::ShapeFixer(const ir::Operands &operand_ctx,
const std::shared_ptr<TensorBuilder> &tensor_builder)
: _ctx(operand_ctx), _tensor_builder(tensor_builder)
{
#include <backend/IShapeFixer.h>
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "operand/Tensor.h"
#include "TensorBuilder.h"
class ShapeFixer : public IShapeFixer
{
public:
- ShapeFixer(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
+ ShapeFixer(const ir::Operands &ctx, const std::shared_ptr<TensorBuilder> &tensor_builder);
std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
void visit(const model::operation::Add &) override;
private:
- const neurun::model::Operands &_ctx;
+ const ir::Operands &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
};
// DO NOTHING
}
-void TensorBuilder::registerTensorInfo(const model::OperandIndex &ind,
- const model::OperandInfo &tensor_info,
+void TensorBuilder::registerTensorInfo(const ir::OperandIndex &ind,
+ const ir::OperandInfo &tensor_info,
ir::Layout backend_layout, bool as_const)
{
_tensor_info_map.emplace(ind, tensor_info);
_constants.append(ind);
}
-void TensorBuilder::registerSubTensorInfo(const model::OperandIndex &,
- const compiler::SubTensorInfo &)
+void TensorBuilder::registerSubTensorInfo(const ir::OperandIndex &, const compiler::SubTensorInfo &)
{
// Not supported yet
assert(false);
}
-void TensorBuilder::notifyFirstUse(const model::OperandIndex &ind)
+void TensorBuilder::notifyFirstUse(const ir::OperandIndex &ind)
{
assert(_tensor_info_map.find(ind) != _tensor_info_map.end());
const auto &tensor_info = _tensor_info_map.at(ind);
_tensor_mgr->claimPlan(ind, size);
}
-void TensorBuilder::notifyLastUse(const model::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); }
+void TensorBuilder::notifyLastUse(const ir::OperandIndex &ind) { _tensor_mgr->releasePlan(ind); }
-bool TensorBuilder::isRegistered(const model::OperandIndex &ind) const
+bool TensorBuilder::isRegistered(const ir::OperandIndex &ind) const
{
return _tensor_info_map.find(ind) != _tensor_info_map.end();
}
}
std::shared_ptr<::neurun::backend::operand::ITensor>
-TensorBuilder::tensorAt(const model::OperandIndex &ind)
+TensorBuilder::tensorAt(const ir::OperandIndex &ind)
{
return _tensor_mgr->at(ind);
}
void TensorBuilder::iterate(const IterateFunction &fn) { _tensor_mgr->iterate(fn); }
-std::shared_ptr<operand::Tensor> TensorBuilder::at(const ::neurun::model::OperandIndex &ind)
+std::shared_ptr<operand::Tensor> TensorBuilder::at(const ir::OperandIndex &ind)
{
return _tensor_mgr->at(ind);
}
#include <backend/ITensorBuilder.h>
#include "operand/Tensor.h"
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
#include "TensorManager.h"
namespace neurun
* @param[in] info Operand information
* @param[in] layout Operand data layout
*/
- void registerTensorInfo(const model::OperandIndex &ind, const model::OperandInfo &info,
+ void registerTensorInfo(const ir::OperandIndex &ind, const ir::OperandInfo &info,
ir::Layout backend_layout, bool as_const) override;
/**
* @brief Register subtensor information to allocate on CPU backend
* @param[in] ind Operand index
* @param[in] info Tensor information
*/
- void registerSubTensorInfo(const model::OperandIndex &ind,
+ void registerSubTensorInfo(const ir::OperandIndex &ind,
const compiler::SubTensorInfo &info) override;
- void notifyFirstUse(const model::OperandIndex &) override;
- void notifyLastUse(const model::OperandIndex &) override;
+ void notifyFirstUse(const ir::OperandIndex &) override;
+ void notifyLastUse(const ir::OperandIndex &) override;
- bool isRegistered(const model::OperandIndex &) const override;
+ bool isRegistered(const ir::OperandIndex &) const override;
void prepare(void) override;
void allocateConsts() override;
void finalize() override { /* DO NOTHING */}
std::shared_ptr<::neurun::backend::operand::ITensor>
- tensorAt(const model::OperandIndex &ind) override;
+ tensorAt(const ir::OperandIndex &ind) override;
void iterate(const IterateFunction &fn) override;
std::unique_ptr<ITensorManager> releaseTensorManager(void) override;
- std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
+ std::shared_ptr<operand::Tensor> at(const ir::OperandIndex &ind);
private:
std::unique_ptr<TensorManager> _tensor_mgr;
- model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
- model::OperandIndexMap<ir::Layout> _tensor_layout_map;
- model::OperandIndexSequence _constants;
+ ir::OperandIndexMap<ir::OperandInfo> _tensor_info_map;
+ ir::OperandIndexMap<ir::Layout> _tensor_layout_map;
+ ir::OperandIndexSequence _constants;
};
} // namespace srcn
void TensorManager::deallocateNonconsts(void) { _nonconst_mgr->deallocate(); }
-void TensorManager::buildTensor(const model::OperandIndex &ind,
- const model::OperandInfo &tensor_info, ir::Layout layout,
- bool as_const)
+void TensorManager::buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info,
+ ir::Layout layout, bool as_const)
{
assert(_ind_to_mgr.find(ind) == _ind_to_mgr.end());
if (as_const)
}
}
-void TensorManager::claimPlan(const model::OperandIndex &ind, uint32_t size)
+void TensorManager::claimPlan(const ir::OperandIndex &ind, uint32_t size)
{
assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
_ind_to_mgr.at(ind).claimPlan(ind, size);
}
-void TensorManager::releasePlan(const model::OperandIndex &ind)
+void TensorManager::releasePlan(const ir::OperandIndex &ind)
{
assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
_ind_to_mgr.at(ind).releasePlan(ind);
}
-std::shared_ptr<operand::Tensor> TensorManager::at(const ::neurun::model::OperandIndex &ind)
+std::shared_ptr<operand::Tensor> TensorManager::at(const ir::OperandIndex &ind)
{
assert(_ind_to_mgr.find(ind) != _ind_to_mgr.end());
return _ind_to_mgr.at(ind).tensors().at(ind);
}
-model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::constTensors(void)
+ir::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::constTensors(void)
{
return _const_mgr->tensors();
}
-model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::nonconstTensors(void)
+ir::OperandIndexMap<std::shared_ptr<operand::Tensor>> &TensorManager::nonconstTensors(void)
{
return _nonconst_mgr->tensors();
}
-void TensorManager::iterate(const std::function<void(const model::OperandIndex &)> &fn)
+void TensorManager::iterate(const std::function<void(const ir::OperandIndex &)> &fn)
{
for (auto it : _nonconst_mgr->tensors())
fn(it.first);
#include "backend/ITensorManager.h"
#include "MemoryManager.h"
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
namespace neurun
{
void deallocateConsts(void) override;
void deallocateNonconsts(void) override;
- void buildTensor(const model::OperandIndex &ind, const model::OperandInfo &tensor_info,
+ void buildTensor(const ir::OperandIndex &ind, const ir::OperandInfo &tensor_info,
ir::Layout layout, bool as_const);
- void claimPlan(const model::OperandIndex &ind, uint32_t size);
- void releasePlan(const model::OperandIndex &ind);
+ void claimPlan(const ir::OperandIndex &ind, uint32_t size);
+ void releasePlan(const ir::OperandIndex &ind);
- std::shared_ptr<operand::Tensor> at(const ::neurun::model::OperandIndex &ind);
+ std::shared_ptr<operand::Tensor> at(const ir::OperandIndex &ind);
- model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &constTensors(void);
- model::OperandIndexMap<std::shared_ptr<operand::Tensor>> &nonconstTensors(void);
+ ir::OperandIndexMap<std::shared_ptr<operand::Tensor>> &constTensors(void);
+ ir::OperandIndexMap<std::shared_ptr<operand::Tensor>> &nonconstTensors(void);
- void iterate(const std::function<void(const model::OperandIndex &)> &fn);
+ void iterate(const std::function<void(const ir::OperandIndex &)> &fn);
private:
std::unique_ptr<MemoryManager> _const_mgr;
std::unique_ptr<MemoryManager> _nonconst_mgr;
- model::OperandIndexMap<MemoryManager &> _ind_to_mgr;
+ ir::OperandIndexMap<MemoryManager &> _ind_to_mgr;
};
} // namespace srcn
namespace srcn
{
-TensorRegister::TensorRegister(const model::Operands &operands,
+TensorRegister::TensorRegister(const ir::Operands &operands,
const std::shared_ptr<TensorBuilder> &tensor_builder)
: _operands{operands}, _tensor_builder{tensor_builder}
{
const auto backend_filter_layout =
backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWIO : kernel::FilterLayout::OIHW;
- model::OperandInfo backend_info{
+ ir::OperandInfo backend_info{
asKernelShape(kernel_obj.shape(), frontend_filter_layout, backend_filter_layout),
kernel_obj.info().typeInfo()};
_tensor_builder->registerTensorInfo(kernel_index, backend_info, backend_layout,
const auto backend_filter_layout =
backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWIO : kernel::FilterLayout::OIHW;
- model::OperandInfo backend_info{
+ ir::OperandInfo backend_info{
asKernelShape(kernel_obj.shape(), frontend_filter_layout, backend_filter_layout),
kernel_obj.info().typeInfo()};
_tensor_builder->registerTensorInfo(kernel_index, backend_info, backend_layout,
const auto backend_filter_layout =
backend_layout == ir::Layout::NHWC ? kernel::FilterLayout::HWOI : kernel::FilterLayout::IOHW;
- model::OperandInfo backend_info{
+ ir::OperandInfo backend_info{
asKernelShape(kernel_obj.shape(), frontend_filter_layout, backend_filter_layout),
kernel_obj.info().typeInfo()};
_tensor_builder->registerTensorInfo(kernel_index, backend_info, backend_layout,
class TensorRegister : public ITensorRegister
{
public:
- TensorRegister(const model::Operands &operands,
+ TensorRegister(const ir::Operands &operands,
const std::shared_ptr<TensorBuilder> &tensor_builder);
public:
void visit(const model::operation::TransposeConv &) override;
private:
- const model::Operands &operands() const override { return _operands; }
+ const ir::Operands &operands() const override { return _operands; }
std::shared_ptr<ITensorBuilder> tensor_builder() const override { return _tensor_builder; }
bool supportSubTensor() const final { return false; }
private:
- const model::Operands &_operands;
+ const ir::Operands &_operands;
const std::shared_ptr<TensorBuilder> _tensor_builder;
};
}
}
-TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout,
+TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout,
ir::Layout backend_layout)
{
TensorDescriptor descriptor;
#include <limits>
#include <vector>
-#include "model/Operand.h"
+#include "ir/Operand.h"
#include "ir/DataType.h"
#include <ir/InternalType.h>
#include <ncnn/srcn/conv_type.h>
nnfw::srcn::convType_t convertLayout(ir::Layout layout);
-TensorDescriptor getTensorDescriptor(const ::neurun::model::Operand &o, ir::Layout frontend_layout,
+TensorDescriptor getTensorDescriptor(const ir::Operand &o, ir::Layout frontend_layout,
ir::Layout backend_layout);
} // namespace kernel
#include <backend/operand/ITensor.h>
#include <ir/Layout.h>
-#include "model/OperandInfo.h"
+#include "ir/OperandInfo.h"
namespace neurun
{
Tensor() = delete;
public:
- Tensor(const model::OperandInfo &info, ir::Layout layout) : _info(info), _layout(layout)
+ Tensor(const ir::OperandInfo &info, ir::Layout layout) : _info(info), _layout(layout)
{
// DO NOTHING
}
void access(const std::function<void(ITensor &tensor)> &fn) final;
private:
- model::OperandInfo _info;
+ ir::OperandInfo _info;
uint8_t *_buffer = nullptr;
ir::Layout _layout;
};
#include <memory>
-#include "model/Operands.h"
+#include "ir/Operands.h"
namespace neurun
{
virtual std::shared_ptr<neurun::backend::IConfig> config() const = 0;
virtual std::unique_ptr<BackendContext>
- newContext(const model::Operands &operands,
+ newContext(const ir::Operands &operands,
const std::shared_ptr<backend::custom::IKernelBuilder> &kb) const = 0;
};
#include "ITensorBuilder.h"
#include "ir/Layout.h"
-#include "model/Operand.h"
-#include "model/Operands.h"
+#include "ir/Operand.h"
+#include "ir/Operands.h"
#include "model/OperationVisitor.h"
#include "model/Subgraph.h"
#include "util/logging.h"
namespace
{
template <typename T>
-static void Init(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj,
+static void Init(const neurun::ir::Operand &model_obj, neurun::backend::operand::ITensor &obj,
const bool copy,
const neurun::ir::Layout frontend_layout = neurun::ir::Layout::UNKNOWN)
{
}
template <typename T>
-void copyInit(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj)
+void copyInit(const neurun::ir::Operand &model_obj, neurun::backend::operand::ITensor &obj)
{
Init<T>(model_obj, obj, true);
}
template <typename T>
-void permuteInit(const neurun::model::Operand &model_obj, neurun::backend::operand::ITensor &obj,
+void permuteInit(const neurun::ir::Operand &model_obj, neurun::backend::operand::ITensor &obj,
const neurun::ir::Layout frontend_layout)
{
const bool copy = frontend_layout == obj.layout();
}
public:
- using Initializer = std::function<void(const model::Operand &, backend::operand::ITensor &)>;
+ using Initializer = std::function<void(const ir::Operand &, backend::operand::ITensor &)>;
- void generate(const model::Subgraph &subg, const model::Operands &operands)
+ void generate(const model::Subgraph &subg, const ir::Operands &operands)
{
_current_subg_layout = subg.getLayout();
subg.accept(*this);
protected:
#define OP(InternalName) \
virtual void visit(const model::operation::InternalName &) override { /* DO NOTHING */}
-#include "model/Operations.lst"
+#include "ir/Operations.lst"
#undef OP
protected:
- virtual const model::Operands &operands() const = 0;
+ virtual const ir::Operands &operands() const = 0;
virtual std::shared_ptr<ITensorBuilder> tensor_builder() const = 0;
protected:
- void registerCopyInitializer(const model::OperandIndex &index, const model::Operand &obj)
+ void registerCopyInitializer(const ir::OperandIndex &index, const ir::Operand &obj)
{
// For only CONSTANTS
// TODO Add to check if tensor has been allocated
}
protected:
- void registerPermuteInitializer(const model::OperandIndex &index, const model::Operand &obj)
+ void registerPermuteInitializer(const ir::OperandIndex &index, const ir::Operand &obj)
{
// For only CONSTANTS
// TODO Add to check if tensor has been allocated
}
private:
- bool exist(const model::OperandIndex &ind) { return _init_map.find(ind) != _init_map.end(); }
+ bool exist(const ir::OperandIndex &ind) { return _init_map.find(ind) != _init_map.end(); }
protected:
- std::unordered_map<model::OperandIndex, Initializer> _init_map;
+ std::unordered_map<ir::OperandIndex, Initializer> _init_map;
ir::Layout _current_subg_layout;
};
{ \
throw std::runtime_error("KernelGenerator: NYI for operation '" #InternalName "'"); \
}
-#include "model/Operations.lst"
+#include "ir/Operations.lst"
#undef OP
protected:
{ \
throw std::runtime_error("ShapeFixer: NYI for operation '" #InternalName "'"); \
}
-#include "model/Operations.lst"
+#include "ir/Operations.lst"
#undef OP
public:
#include <map>
-#include "model/Index.h"
-#include "model/OperandInfo.h"
+#include "ir/Index.h"
+#include "ir/OperandInfo.h"
#include "model/Operation.h"
#include "ir/Layout.h"
#include "operand/ITensor.h"
struct ITensorBuilder
{
- using IterateFunction = std::function<void(const model::OperandIndex &)>;
+ using IterateFunction = std::function<void(const ir::OperandIndex &)>;
virtual ~ITensorBuilder(void) = default;
/**
* @brief Register tensor information to allocate on backend
*/
- virtual void registerTensorInfo(const model::OperandIndex &, const model::OperandInfo &,
+ virtual void registerTensorInfo(const ir::OperandIndex &, const ir::OperandInfo &,
ir::Layout backend_layout, bool as_const) = 0;
/**
* @brief Register subtensor information to allocate on backend
*/
- virtual void registerSubTensorInfo(const model::OperandIndex &,
- const compiler::SubTensorInfo &) = 0;
+ virtual void registerSubTensorInfo(const ir::OperandIndex &, const compiler::SubTensorInfo &) = 0;
- virtual void notifyFirstUse(const model::OperandIndex &) = 0;
- virtual void notifyLastUse(const model::OperandIndex &) = 0;
+ virtual void notifyFirstUse(const ir::OperandIndex &) = 0;
+ virtual void notifyLastUse(const ir::OperandIndex &) = 0;
- virtual bool isRegistered(const model::OperandIndex &) const = 0;
+ virtual bool isRegistered(const ir::OperandIndex &) const = 0;
virtual void prepare(void) = 0;
virtual void allocateConsts() = 0;
virtual void finalize() = 0;
virtual std::shared_ptr<::neurun::backend::operand::ITensor>
- tensorAt(const model::OperandIndex &ind) = 0;
+ tensorAt(const ir::OperandIndex &ind) = 0;
virtual void iterate(const IterateFunction &fn) = 0;
virtual void preVisit(const model::Operation &) = 0;
#include "ir/operand/ParentInfo.h"
#include "ITensorBuilder.h"
#include "ir/Layout.h"
-#include "model/OperandIndexSequence.h"
-#include "model/OperandInfo.h"
-#include "model/Operands.h"
+#include "ir/OperandIndexSequence.h"
+#include "ir/OperandInfo.h"
+#include "ir/Operands.h"
#include "model/OperationVisitor.h"
namespace
}
protected:
- virtual const model::Operands &operands() const = 0;
+ virtual const ir::Operands &operands() const = 0;
virtual std::shared_ptr<ITensorBuilder> tensor_builder() const = 0;
virtual bool supportSubTensor() const = 0;
#define OP(InternalName) \
virtual void visit(const model::operation::InternalName &node) override \
{ \
- model::OperandIndexSequence indices{node.getInputs()}; \
+ ir::OperandIndexSequence indices{node.getInputs()}; \
indices.append(node.getOutputs()); \
for (const auto &index : indices) \
{ \
defaultRegisterTensorInfo(index); \
} \
}
-#include "model/Operations.lst"
+#include "ir/Operations.lst"
#undef OP
protected:
- void defaultRegisterTensorInfo(const model::OperandIndex &index) const
+ void defaultRegisterTensorInfo(const ir::OperandIndex &index) const
{
if (tensor_builder()->isRegistered(index))
{
}
else
{
- model::OperandInfo backend_info{
- permuteTensorShape(obj.shape(), frontend_layout, backend_layout), obj.typeInfo()};
+ ir::OperandInfo backend_info{permuteTensorShape(obj.shape(), frontend_layout, backend_layout),
+ obj.typeInfo()};
tensor_builder()->registerTensorInfo(index, backend_info, backend_layout, obj.isConstant());
}
}
protected:
virtual ir::Layout frontendLayout() const final { return _current_subg_layout; }
- virtual ir::Layout backendLayout(const model::OperandIndex &index) const final
+ virtual ir::Layout backendLayout(const ir::OperandIndex &index) const final
{
assert(_lower_info_map != nullptr);
const auto lower_info = _lower_info_map->operand.at(index).get();
}
private:
- compiler::SubTensorInfo generateSubTensorInfo(const model::Operand &obj,
- ir::Layout frontend_layout,
+ compiler::SubTensorInfo generateSubTensorInfo(const ir::Operand &obj, ir::Layout frontend_layout,
ir::Layout backend_layout) const
{
assert(obj.shape().rank() <= 4);
shape.extendRank(4);
offset = {offset[0], offset[2], offset[3], offset[1]};
}
- model::Operand subtensor_obj{permuteTensorShape(shape, frontend_layout, backend_layout),
- obj.typeInfo()};
+ ir::Operand subtensor_obj{permuteTensorShape(shape, frontend_layout, backend_layout),
+ obj.typeInfo()};
subtensor_obj.parent_info(
nnfw::cpp14::make_unique<graph::operand::ParentInfo>(parent_index, offset));
return compiler::SubTensorInfo{subtensor_obj};
#ifndef __NEURUN_COMPILER_SUBTENSOR_INFO_H__
#define __NEURUN_COMPILER_SUBTENSOR_INFO_H__
-#include "model/Operand.h"
+#include "ir/Operand.h"
namespace neurun
{
* @brief Construct a new SubTensorInfo object
* @param[in] obj SubTensor object
*/
- SubTensorInfo(const model::Operand &obj)
+ SubTensorInfo(const ir::Operand &obj)
: _parent{obj.parent_info()->parent()}, _shape{obj.shape()}, _type{obj.typeInfo()},
_offset{obj.parent_info()->offset()}
{
* @brief Return parent tensor index
* @return Parent tensor index
*/
- const model::OperandIndex parent(void) const { return _parent; }
+ const ir::OperandIndex parent(void) const { return _parent; }
/**
* @brief Return tensor shape
* @return Tensor shape
const neurun::util::Coordinates offset(void) const { return _offset; }
private:
- const model::OperandIndex _parent;
+ const ir::OperandIndex _parent;
const ir::Shape _shape;
const ir::TypeInfo _type;
const neurun::util::Coordinates _offset;
* @param[in] length Input data's length
* @param[in] layout Input data's data format
*/
- void setInput(const model::IOIndex &index, const void *buffer, size_t length,
+ void setInput(const ir::IOIndex &index, const void *buffer, size_t length,
ir::Layout layout = ir::Layout::NHWC);
/**
* @brief Set input data's information, especially to specify unknown dimensions on model
* @param[in] length Input data's length
* @param[in] layout Input data's data format
*/
- void setInput(const model::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape,
+ void setInput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape,
const void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC);
/**
* @brief Set output data's information
* @param[in] length Output data's length
* @param[in] layout Output data's data format
*/
- void setOutput(const model::IOIndex &index, void *buffer, size_t length,
+ void setOutput(const ir::IOIndex &index, void *buffer, size_t length,
ir::Layout layout = ir::Layout::NHWC);
/**
* @brief Set output data's information, especially to specify unknown dimensions on model
* @param[in] length Output data's length
* @param[in] layout Output data's data format
*/
- void setOutput(const model::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape,
+ void setOutput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape,
void *buffer, size_t length, ir::Layout layout = ir::Layout::NHWC);
/**
* @brief Set input data's data format
* @param[in] index Input index
* @param[in] layout Input data's data format
*/
- void setInputLayout(const model::IOIndex &index, ir::Layout layout);
+ void setInputLayout(const ir::IOIndex &index, ir::Layout layout);
/**
* @brief Set output data's data format
* @param[in] index Output index
* @param[in] layout Output data's data format
*/
- void setOutputLayout(const model::IOIndex &index, ir::Layout layout);
+ void setOutputLayout(const ir::IOIndex &index, ir::Layout layout);
/**
* @brief Execution
* @note It should be called after setting input and output buffer
#include "ir/Graph.h"
#include "IFunction.h"
#include "IODescription.h"
-#include "model/OperationIndexMap.h"
+#include "ir/OperationIndexMap.h"
namespace neurun
{
* @brief Set an ordering on operations
* @param[in] ranks The table encoding the ordering
*/
- virtual void setIndexedRanks(std::shared_ptr<model::OperationIndexMap<int64_t>>) = 0;
+ virtual void setIndexedRanks(std::shared_ptr<ir::OperationIndexMap<int64_t>>) = 0;
/**
* @brief Start execution
#include <vector>
-#include "model/OperandInfo.h"
+#include "ir/OperandInfo.h"
namespace neurun
{
struct InputDesc
{
- const model::OperandInfo info;
+ const ir::OperandInfo info;
const void *buffer;
const size_t size;
const ir::Layout layout;
InputDesc(void) = delete;
- InputDesc(const model::OperandInfo &info, const void *buffer, const size_t size,
- ir::Layout layout)
+ InputDesc(const ir::OperandInfo &info, const void *buffer, const size_t size, ir::Layout layout)
: info(info), buffer(buffer), size(size), layout(layout)
{
}
struct OutputDesc
{
- const model::OperandInfo info;
+ const ir::OperandInfo info;
void *buffer;
const size_t size;
const ir::Layout layout;
OutputDesc(void) = delete;
- OutputDesc(const model::OperandInfo &info, void *buffer, const size_t size, ir::Layout layout)
+ OutputDesc(const ir::OperandInfo &info, void *buffer, const size_t size, ir::Layout layout)
: info(info), buffer(buffer), size(size), layout(layout)
{
}
* limitations under the License.
*/
-#ifndef __NEURUN_MODEL_DATA_H__
-#define __NEURUN_MODEL_DATA_H__
+#ifndef __NEURUN_IR_DATA_H__
+#define __NEURUN_IR_DATA_H__
#include <algorithm>
namespace neurun
{
-namespace model
+namespace ir
{
struct Data
const size_t _size;
};
+} // namespace ir
+
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+using Data = ir::Data;
+using CachedData = ir::CachedData;
} // namespace model
} // namespace neurun
-#endif // __NEURUN_MODEL_DATA_H__
+#endif // __NEURUN_IR_DATA_H__
#include <functional>
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "model/Operations.h"
#include "ir/LowerInfoMap.h"
#include "model/Subgraph.h"
{
public:
using GraphRef = typename std::conditional<is_const, const Graph &, Graph &>::type;
- using IndexRef = const model::OperationIndex &;
+ using IndexRef = const ir::OperationIndex &;
using NodeRef =
typename std::conditional<is_const, const model::Operation &, model::Operation &>::type;
using IterFn = std::function<void(IndexRef, NodeRef)>;
// Graph Building
public:
- model::OperandIndex addOperand(const ir::Shape &shape, const ir::TypeInfo &type);
- model::OperationIndex addOperation(std::unique_ptr<model::Operation> &&node);
- void setOperandValue(const model::OperandIndex &ind, std::unique_ptr<model::Data> &&data);
- void addInput(const model::OperandIndex &ind);
- void addOutput(const model::OperandIndex &ind);
+ ir::OperandIndex addOperand(const ir::Shape &shape, const ir::TypeInfo &type);
+ ir::OperationIndex addOperation(std::unique_ptr<model::Operation> &&node);
+ void setOperandValue(const ir::OperandIndex &ind, std::unique_ptr<ir::Data> &&data);
+ void addInput(const ir::OperandIndex &ind);
+ void addOutput(const ir::OperandIndex &ind);
void finishBuilding(void);
void lower(void);
- void removeOperand(const model::OperandIndex &ind) { _operands.remove(ind); }
+ void removeOperand(const ir::OperandIndex &ind) { _operands.remove(ind); }
bool isBuildingPhase(void) const { return _phase == Phase::BUILDING; }
private:
// Accessors
public:
- const model::OperandIndexSequence &getInputs() const { return _inputs; }
- model::OperandIndexSequence &getInputs() { return _inputs; }
- const model::OperandIndexSequence &getOutputs() const { return _outputs; }
- model::OperandIndexSequence &getOutputs() { return _outputs; }
- const model::Operands &operands() const { return _operands; }
- model::Operands &operands() { return _operands; } // TODO Remove this non-const accessor
+ const ir::OperandIndexSequence &getInputs() const { return _inputs; }
+ ir::OperandIndexSequence &getInputs() { return _inputs; }
+ const ir::OperandIndexSequence &getOutputs() const { return _outputs; }
+ ir::OperandIndexSequence &getOutputs() { return _outputs; }
+ const ir::Operands &operands() const { return _operands; }
+ ir::Operands &operands() { return _operands; } // TODO Remove this non-const accessor
const model::Operations &operations() const { return _operations; }
model::Operations &operations() { return _operations; }
const compiler::BackendResolver *backend_resolver() const { return _backend_resolver.get(); }
private:
Phase _phase{Phase::BUILDING};
model::Operations _operations;
- model::Operands _operands;
- model::OperandIndexSequence _inputs;
- model::OperandIndexSequence _outputs;
+ ir::Operands _operands;
+ ir::OperandIndexSequence _inputs;
+ ir::OperandIndexSequence _outputs;
// For LOWERED phase
public:
const LowerInfoMap *getLowerInfo() const { return _lower_info_map.get(); }
- const operation::LowerInfo *getLowerInfo(const model::SubgraphIndex &subg_index) const;
- void setLowerInfo(const model::SubgraphIndex &subg_index,
+ const operation::LowerInfo *getLowerInfo(const ir::SubgraphIndex &subg_index) const;
+ void setLowerInfo(const ir::SubgraphIndex &subg_index,
std::unique_ptr<operation::LowerInfo> &&lower_info);
- void removeLowerInfo(const model::SubgraphIndex &subg_index);
- const operand::LowerInfo *getLowerInfo(const model::OperandIndex &index) const;
- operand::LowerInfo *getLowerInfo(const model::OperandIndex &index);
- void setLowerInfo(const model::OperandIndex &index,
+ void removeLowerInfo(const ir::SubgraphIndex &subg_index);
+ const operand::LowerInfo *getLowerInfo(const ir::OperandIndex &index) const;
+ operand::LowerInfo *getLowerInfo(const ir::OperandIndex &index);
+ void setLowerInfo(const ir::OperandIndex &index,
std::unique_ptr<operand::LowerInfo> &&lower_info);
- void removeLowerInfo(const model::OperandIndex &index);
+ void removeLowerInfo(const ir::OperandIndex &index);
model::Subgraphs &subgraphs()
{
assert(_subgraphs);
void setBackendResolver(std::unique_ptr<compiler::BackendResolver> &&br);
private:
- void
- makeSubgraphs(model::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> &operands_lower_info);
+ void makeSubgraphs(ir::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> &operands_lower_info);
void manipulateLowerInfo(
- model::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> &operands_lower_info);
+ ir::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> &operands_lower_info);
void dumpLowerInfo();
- bool mergeable(const model::SubgraphIndex &subg_index, const model::OperationIndex &node_index,
+ bool mergeable(const ir::SubgraphIndex &subg_index, const ir::OperationIndex &node_index,
ir::Layout layout);
- model::SubgraphIndex appendFreshSingleOpSubgraph(const model::OperationIndex &node_index,
- const model::Operation &node, ir::Layout layout);
+ ir::SubgraphIndex appendFreshSingleOpSubgraph(const ir::OperationIndex &node_index,
+ const model::Operation &node, ir::Layout layout);
private:
std::unique_ptr<compiler::BackendResolver> _backend_resolver;
* limitations under the License.
*/
-#ifndef __NEURUN_MODEL_OPERAND_INDEX_H__
-#define __NEURUN_MODEL_OPERAND_INDEX_H__
+#ifndef __NEURUN_IR_OPERAND_INDEX_H__
+#define __NEURUN_IR_OPERAND_INDEX_H__
#include "util/Index.h"
namespace neurun
{
-namespace model
+namespace ir
{
struct OperationIndexTag;
struct SubgraphIndexTag;
using SubgraphIndex = ::neurun::util::Index<uint32_t, SubgraphIndexTag>;
-} // namespace model
+} // namespace ir
+
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+using OperationIndex = ir::OperationIndex;
+using OperandIndex = ir::OperandIndex;
+using IOIndex = ir::IOIndex;
+using SubgraphIndex = ir::SubgraphIndex;
+}
+
} // namespace neurun
-#endif // __NEURUN_MODEL_OPERAND_INDEX_H__
+#endif // __NEURUN_IR_OPERAND_INDEX_H__
#include "ir/operand/LowerInfo.h"
#include "ir/operation/LowerInfo.h"
-#include "model/OperandIndexMap.h"
-#include "model/Index.h"
+#include "ir/OperandIndexMap.h"
+#include "ir/Index.h"
namespace neurun
{
struct LowerInfoMap
{
- std::unordered_map<model::SubgraphIndex, std::unique_ptr<operation::LowerInfo>> operation;
- model::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> operand;
+ std::unordered_map<ir::SubgraphIndex, std::unique_ptr<operation::LowerInfo>> operation;
+ ir::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> operand;
};
} // namespace graph
* limitations under the License.
*/
-#ifndef __NEURUN_MODEL_OP_CODE_H__
-#define __NEURUN_MODEL_OP_CODE_H__
+#ifndef __NEURUN_IR_OP_CODE_H__
+#define __NEURUN_IR_OP_CODE_H__
#include <functional>
#include <stdint.h>
namespace neurun
{
-namespace model
+namespace ir
{
enum class OpCode
{
Invalid, //< Unused
#define OP(Name) Name, //< All operations
-#include "Operations.lst"
+#include "ir/Operations.lst"
#undef OP
Subgraph, //< Subgraph is treated specially
COUNT
const char *toString(OpCode opcode);
+} // namespace ir
+
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+using OpCode = ir::OpCode;
} // namespace model
} // namespace neurun
namespace std
{
-template <> struct hash<::neurun::model::OpCode>
+template <> struct hash<neurun::ir::OpCode>
{
- size_t operator()(::neurun::model::OpCode value) const noexcept
+ size_t operator()(neurun::ir::OpCode value) const noexcept
{
- using type = typename std::underlying_type<::neurun::model::OpCode>::type;
+ using type = typename std::underlying_type<neurun::ir::OpCode>::type;
return hash<type>()(static_cast<type>(value));
}
};
} // namespace std
-#endif // __NEURUN_MODEL_OP_CODE_H__
+#endif // __NEURUN_IR_OP_CODE_H__
* limitations under the License.
*/
-#ifndef __NEURUN_MODEL_OPERAND_H__
-#define __NEURUN_MODEL_OPERAND_H__
+#ifndef __NEURUN_IR_OPERAND_H__
+#define __NEURUN_IR_OPERAND_H__
#include <cassert>
#include <cstdint>
#include <cpp14/memory.h>
#include <algorithm>
-#include "Data.h"
+#include "ir/Data.h"
#include "ir/DataType.h"
-#include "OperandInfo.h"
+#include "ir/OperandInfo.h"
#include "ir/operand/ParentInfo.h" // TODO Remove this dependency
-#include "model/OperationIndexList.h"
+#include "ir/OperationIndexList.h"
namespace neurun
{
-namespace model
+namespace ir
{
class Operand
std::shared_ptr<graph::operand::ParentInfo> _parent_info;
};
+} // namespace ir
+
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+using Operand = ir::Operand;
} // namespace model
} // namespace neurun
-#endif // __NEURUN_MODEL_OPERAND_H__
+#endif // __NEURUN_IR_OPERAND_H__
namespace neurun
{
-namespace model
-{
-namespace operation
+namespace ir
{
class OperandConstraint
uint32_t _end;
};
-} // namespace operation
+} // namespace ir
+
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+using OperandConstraint = ir::OperandConstraint;
} // namespace model
} // namespace neurun
* limitations under the License.
*/
-#ifndef __NEURUN_MODEL_OPERAND_INDEX_MAP_H__
-#define __NEURUN_MODEL_OPERAND_INDEX_MAP_H__
+#ifndef __NEURUN_IR_OPERAND_INDEX_MAP_H__
+#define __NEURUN_IR_OPERAND_INDEX_MAP_H__
#include <unordered_map>
-#include "Index.h"
+#include "ir/Index.h"
namespace neurun
{
-namespace model
+namespace ir
{
-template <typename T> using OperandIndexMap = std::unordered_map<model::OperandIndex, T>;
+template <typename T> using OperandIndexMap = std::unordered_map<OperandIndex, T>;
+
+} // namespace ir
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+template <typename T> using OperandIndexMap = ir::OperandIndexMap<T>;
} // namespace model
} // namespace neurun
-#endif // __NEURUN_MODEL_OPERAND_INDEX_MAP_H__
+#endif // __NEURUN_IR_OPERAND_INDEX_MAP_H__
#include <initializer_list>
#include <vector>
-#include "Index.h"
+#include "ir/Index.h"
namespace neurun
{
-namespace model
+namespace ir
{
class OperandIndexSequence
std::vector<OperandIndex> _set;
};
+} // namespace ir
+
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+using OperandIndexSequence = ir::OperandIndexSequence;
} // namespace model
} // namespace neurun
* @file OperandInfo.h
* @brief This file contains OperandInfo class
*/
-#ifndef __NEURUN_MODEL_OPERAND_INFO_H__
-#define __NEURUN_MODEL_OPERAND_INFO_H__
+#ifndef __NEURUN_IR_OPERAND_INFO_H__
+#define __NEURUN_IR_OPERAND_INFO_H__
#include "ir/Shape.h"
#include "ir/TypeInfo.h"
namespace neurun
{
-namespace model
+namespace ir
{
/**
TypeInfo _typeInfo;
};
+} // namespace ir
+
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+using OperandInfo = ir::OperandInfo;
} // namespace model
} // namespace neurun
-#endif // __NEURUN_MODEL_OPERAND_INFO_H__
+#endif // __NEURUN_IR_OPERAND_INFO_H__
* limitations under the License.
*/
-#ifndef __NEURUN_MODEL_OPERANDS_H__
-#define __NEURUN_MODEL_OPERANDS_H__
+#ifndef __NEURUN_IR_OPERANDS_H__
+#define __NEURUN_IR_OPERANDS_H__
#include <memory>
#include <unordered_map>
-#include "Operand.h"
-#include "Index.h"
+#include "ir/Operand.h"
+#include "ir/Index.h"
#include "util/ObjectManager.h"
namespace neurun
{
-namespace model
+namespace ir
{
class Operands : public util::ObjectManager<OperandIndex, Operand>
{
};
+} // namespace ir
+
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+using Operands = ir::Operands;
} // namespace model
} // namespace neurun
#include <initializer_list>
#include <list>
-#include "model/Index.h"
+#include "ir/Index.h"
namespace neurun
{
-namespace model
+namespace ir
{
class OperationIndexList
std::list<OperationIndex> _list;
};
+} // namespace ir
+
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+using OperationIndexList = ir::OperationIndexList;
} // namespace model
} // namespace neurun
* limitations under the License.
*/
-#ifndef __NEURUN_MODEL_OPERATION_INDEX_MAP_H__
-#define __NEURUN_MODEL_OPERATION_INDEX_MAP_H__
+#ifndef __NEURUN_IR_OPERATION_INDEX_MAP_H__
+#define __NEURUN_IR_OPERATION_INDEX_MAP_H__
#include <unordered_map>
-#include "Index.h"
+#include "ir/Index.h"
namespace neurun
{
-namespace model
+namespace ir
{
-template <typename T> using OperationIndexMap = std::unordered_map<model::OperationIndex, T>;
+template <typename T> using OperationIndexMap = std::unordered_map<OperationIndex, T>;
+
+} // namespace ir
+// TODO Remove after merging 'graph' and 'model' namespaces.
+namespace model
+{
+template <typename T> using OperationIndexMap = ir::OperationIndexMap<T>;
} // namespace model
} // namespace neurun
-#endif // __NEURUN_MODEL_OPERATION_INDEX_MAP_H__
+#endif // __NEURUN_IR_OPERATION_INDEX_MAP_H__
#include <stdint.h>
-#include "model/Index.h"
+#include "ir/Index.h"
#include "util/Coordinates.h"
namespace neurun
* @param[in] coordinate Offset of child operand in parent operand
* @return
*/
- ParentInfo(const model::OperandIndex parent, const Coordinates &coordinate)
+ ParentInfo(const ir::OperandIndex parent, const Coordinates &coordinate)
: _parent{parent}, _coordinate{coordinate}
{
// DO NOTHING
* @brief Return parent index
* @return Parent index
*/
- model::OperandIndex parent(void) const { return _parent; }
+ ir::OperandIndex parent(void) const { return _parent; }
/**
* @brief Retern offset in parent
* @return Offset
Coordinates offset(void) const { return _coordinate; }
private:
- model::OperandIndex _parent;
+ ir::OperandIndex _parent;
Coordinates _coordinate;
};
#include <memory>
-#include "model/OpCode.h"
-#include "model/Operand.h"
-#include "model/OperandIndexSequence.h"
-#include "model/OperandConstraint.h"
+#include "ir/OpCode.h"
+#include "ir/Operand.h"
+#include "ir/OperandIndexSequence.h"
+#include "ir/OperandConstraint.h"
namespace neurun
{
namespace model
{
-using OperandConstraint = ::neurun::model::operation::OperandConstraint;
-
class Operation
{
public:
#define OP(InternalName) \
virtual void visit(const operation::InternalName &) {}
-#include "model/Operations.lst"
+#include "ir/Operations.lst"
#undef OP
// This Subgraph node should be handled specially so that
#ifndef __NEURUN_MODEL_OPERATIONS_H__
#define __NEURUN_MODEL_OPERATIONS_H__
-#include "model/Index.h"
+#include "ir/Index.h"
#include "model/Operation.h"
#include "util/ObjectManager.h"
#include <memory>
#include "ir/Layout.h"
-#include "Index.h"
+#include "ir/Index.h"
#include "Operation.h"
namespace neurun
std::vector<Element>::const_iterator end() const { return _operations.end(); }
private:
- bool exist(const neurun::model::OperationIndex &index) const;
+ bool exist(const OperationIndex &index) const;
private:
std::vector<Element> _operations;
#ifndef __NEURUN_MODEL_SUBGRAPHS_H__
#define __NEURUN_MODEL_SUBGRAPHS_H__
-#include "model/Index.h"
+#include "ir/Index.h"
#include "model/Subgraph.h"
#include "util/ObjectManager.h"
#define OP(InternalName) \
CONFIG(OP_BACKEND_ ## InternalName, std::string, "")
-#include "model/Operations.lst"
+#include "ir/Operations.lst"
#undef OP
#include "model/operation/MaxPool2D.h"
#include "model/operation/Conv2D.h"
#include "model/operation/DepthwiseConv2D.h"
-#include "model/Operands.h"
-#include "model/Index.h"
+#include "ir/Operands.h"
+#include "ir/Index.h"
#include "ir/Layout.h"
namespace neurun
#include "ir/InternalType.h"
#include "ir/Layout.h"
-#include "model/Operand.h"
+#include "ir/Operand.h"
#include "util/Coordinates.h"
#define UNUSED_RELEASE(a) (void)(a)
#include <memory>
#include <map>
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "backend/Backend.h"
namespace neurun
#include "backend/Backend.h"
#include "backend/BackendManager.h"
#include "backend/ITensorBuilder.h"
-#include "model/OperationIndexMap.h"
+#include "ir/OperationIndexMap.h"
namespace neurun
{
class BackendResolver
{
public:
- BackendResolver(const model::Operands &operands,
+ BackendResolver(const ir::Operands &operands,
const std::vector<const backend::Backend *> &backends,
const std::shared_ptr<backend::custom::IKernelBuilder> &kb)
{
BackendResolver &operator=(BackendResolver &&obj) = default;
public:
- const backend::BackendContext *getBackendContext(const model::OperationIndex &index) const
+ const backend::BackendContext *getBackendContext(const ir::OperationIndex &index) const
{
return _context_manager.at(_gen_map.at(index)).get();
}
return ret;
}
- const backend::Backend *getBackend(const model::OperationIndex &index) const
+ const backend::Backend *getBackend(const ir::OperationIndex &index) const
{
return getBackendContext(index)->backend;
}
- void setBackend(const model::OperationIndex &index, const backend::Backend *backend)
+ void setBackend(const ir::OperationIndex &index, const backend::Backend *backend)
{
_gen_map[index] = backend;
}
- void iterate(const std::function<void(const model::OperationIndex &,
+ void iterate(const std::function<void(const ir::OperationIndex &,
const backend::BackendContext &)> &fn) const
{
for (const auto &e : _gen_map)
private:
std::unordered_map<const backend::Backend *, std::unique_ptr<backend::BackendContext>>
_context_manager;
- model::OperationIndexMap<const backend::Backend *> _gen_map;
+ ir::OperationIndexMap<const backend::Backend *> _gen_map;
};
} // namespace compiler
***************************************************/
// Schedule
std::unique_ptr<BackendResolver> br;
- std::shared_ptr<model::OperationIndexMap<int64_t>> indexed_ranks;
+ std::shared_ptr<ir::OperationIndexMap<int64_t>> indexed_ranks;
if (util::getConfigBool(util::config::USE_SCHEDULER))
{
auto scheduler = compiler::HEScheduler(
// Wrap tensors as Object and store them to plan
for (auto &tensor_builder : tensor_builders)
{
- tensor_builder->iterate([&](const model::OperandIndex &index) {
+ tensor_builder->iterate([&](const ir::OperandIndex &index) {
auto object = tensor_builder->tensorAt(index);
operand_context->set(index, object);
});
{
auto operand_context = std::make_shared<OperandContext>();
- graph.subgraphs().iterate([&](const model::SubgraphIndex &, const model::Subgraph &subg) {
+ graph.subgraphs().iterate([&](const ir::SubgraphIndex &, const model::Subgraph &subg) {
auto subtensor_analyzer = SubTensorAnalyzer{graph.operands()};
subg.accept(subtensor_analyzer);
});
// Fix shapes and register tensors
- graph.subgraphs().iterate(
- [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
- auto backend = graph.getLowerInfo(subg_index)->backend();
- auto shape_fixer = graph.backend_resolver()->getBackendContext(backend)->shape_fixer;
- shape_fixer->fix(subg);
- const auto tensor_register =
- graph.backend_resolver()->getBackendContext(backend)->tensor_register;
- tensor_register->registerTensors(subg, graph.getLowerInfo());
- });
-
- graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
+ graph.subgraphs().iterate([&](const ir::SubgraphIndex &subg_index, const model::Subgraph &subg) {
+ auto backend = graph.getLowerInfo(subg_index)->backend();
+ auto shape_fixer = graph.backend_resolver()->getBackendContext(backend)->shape_fixer;
+ shape_fixer->fix(subg);
+ const auto tensor_register =
+ graph.backend_resolver()->getBackendContext(backend)->tensor_register;
+ tensor_register->registerTensors(subg, graph.getLowerInfo());
+ });
+
+ graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) {
const auto lower_info = graph.getLowerInfo(ind);
for (auto factor : lower_info->def_factors())
{
};
// TODO Remove this method and make `append` to get index value as an argument
- void setNextIndex(const model::SubgraphIndex next_index) { _next_index = next_index; }
+ void setNextIndex(const ir::SubgraphIndex next_index) { _next_index = next_index; }
exec::DataflowExecutor::CodeMap &&releaseCodeMap() { return std::move(_code_map); }
private:
- model::SubgraphIndex _next_index;
+ ir::SubgraphIndex _next_index;
exec::DataflowExecutor::CodeMap _code_map;
};
auto execution_builder = nnfw::cpp14::make_unique<ExecutionBuilder>();
// Generate kernels
- graph.subgraphs().iterate(
- [&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
- auto backend = graph.getLowerInfo(subg_index)->backend();
- auto constant_initializer =
- graph.backend_resolver()->getBackendContext(backend)->constant_initializer;
- constant_initializer->generate(subg, graph.operands());
- // TODO This approach is temporal. See declaration of `setNextIndex`.
- execution_builder->setNextIndex(subg_index);
- auto kernel_gen = graph.backend_resolver()->getBackendContext(backend)->kernel_gen;
- kernel_gen->generate(subg, execution_builder.get());
- });
+ graph.subgraphs().iterate([&](const ir::SubgraphIndex &subg_index, const model::Subgraph &subg) {
+ auto backend = graph.getLowerInfo(subg_index)->backend();
+ auto constant_initializer =
+ graph.backend_resolver()->getBackendContext(backend)->constant_initializer;
+ constant_initializer->generate(subg, graph.operands());
+ // TODO This approach is temporal. See declaration of `setNextIndex`.
+ execution_builder->setNextIndex(subg_index);
+ auto kernel_gen = graph.backend_resolver()->getBackendContext(backend)->kernel_gen;
+ kernel_gen->generate(subg, execution_builder.get());
+ });
for (const auto &tensor_builder : tensor_builders)
{
// Wrap tensors as Object and store them to plan
for (auto &tensor_builder : tensor_builders)
{
- tensor_builder->iterate([&](const model::OperandIndex &index) {
+ tensor_builder->iterate([&](const ir::OperandIndex &index) {
auto object = tensor_builder->tensorAt(index);
operand_context->set(index, object);
});
* limitations under the License.
*/
-#include "model/Operand.h"
+#include "ir/Operand.h"
#include "compiler/HEScheduler.h"
#include "ir/Graph.h"
#include "util/ConfigSource.h"
broadcast, scheduling will select it since it doesn't distinguish broadcast and
non-broadcast like it does for quant non-quantized*/
if (backend->config()->id() == "cpu" &&
- (node.opcode() == model::OpCode::Add || node.opcode() == model::OpCode::Sub ||
- node.opcode() == model::OpCode::Mul))
+ (node.opcode() == ir::OpCode::Add || node.opcode() == ir::OpCode::Sub ||
+ node.opcode() == ir::OpCode::Mul))
{
const auto lhs_index{node.getInputs().at(model::operation::Add::Input::LHS)};
const auto rhs_index{node.getInputs().at(model::operation::Add::Input::RHS)};
Adding exception in stage doesn't help. Because if there is a record for Mul without
broadcast, scheduling will select it since it doesn't distinguish broadcast and
non-broadcast like it does for quant non-quantized*/
- else if (backend->config()->id() == "acl_neon" && node.opcode() == model::OpCode::Mul)
+ else if (backend->config()->id() == "acl_neon" && node.opcode() == ir::OpCode::Mul)
{
const auto lhs_index{node.getInputs().at(model::operation::Mul::Input::LHS)};
const auto rhs_index{node.getInputs().at(model::operation::Mul::Input::RHS)};
return true;
}
-void HEScheduler::scheduleBranch(const model::OperationIndex &index,
- model::OperationIndexMap<bool> &scheduled)
+void HEScheduler::scheduleBranch(const ir::OperationIndex &index,
+ ir::OperationIndexMap<bool> &scheduled)
{
auto loc_index = index;
const backend::Backend *parent_backend = nullptr;
parent_backend = _backend_resolver->getBackend(loc_index);
const auto &node = _graph->operations().at(loc_index);
- model::OperandIndex tmp;
/* get the only output operand, that is input of the next single operation
* and just this nodes output.*/
if (node.getOutputs().size() != 1)
{
// Check if profiling info about all backend/node pairs already exists
bool all_nodes_are_profiled = true;
- _graph->operations().iterate([&](const model::OperationIndex &, const model::Operation &op) {
+ _graph->operations().iterate([&](const ir::OperationIndex &, const model::Operation &op) {
if (all_nodes_are_profiled)
all_nodes_are_profiled = isNodeProfiled(op);
});
}
}
- model::OperationIndexMap<bool> visited;
- graph.operations().iterate([&](const model::OperationIndex &index, const model::Operation &) {
- visited[index] = false;
- });
+ ir::OperationIndexMap<bool> visited;
+ graph.operations().iterate(
+ [&](const ir::OperationIndex &index, const model::Operation &) { visited[index] = false; });
// for each task select the backend with the smallest earliest finishing time(eft)
for (const auto &rank : _rank_to_op)
{
VERBOSE(HEScheduler::makeRank) << "task prioritizing" << std::endl;
_graph->operations().iterate(
- [&](const model::OperationIndex &index, const model::Operation &) { DFSMaxRank(index); });
+ [&](const ir::OperationIndex &index, const model::Operation &) { DFSMaxRank(index); });
// Check that ranks are calculated for all operations(nodes)
- _graph->operations().iterate([&](const model::OperationIndex &index, const model::Operation &) {
+ _graph->operations().iterate([&](const ir::OperationIndex &index, const model::Operation &) {
UNUSED_RELEASE(index);
assert(_op_to_rank->find(index) != _op_to_rank->end());
});
VERBOSE(HEScheduler::makeRank) << "task prioritizing finished" << std::endl;
}
-int64_t HEScheduler::DFSMaxRank(const model::OperationIndex &index)
+int64_t HEScheduler::DFSMaxRank(const ir::OperationIndex &index)
{
auto op_to_rank_it = _op_to_rank->find(index);
if (op_to_rank_it != _op_to_rank->end())
return rank;
}
-int64_t HEScheduler::DFSChildrenMaxRank(const model::OperationIndex &index)
+int64_t HEScheduler::DFSChildrenMaxRank(const ir::OperationIndex &index)
{
const auto &node = _graph->operations().at(index);
int64_t max_child_rank = 0;
return prev_op_ft;
}
-bool HEScheduler::schedule(const model::OperationIndex &index,
- const backend::Backend *parent_backend)
+bool HEScheduler::schedule(const ir::OperationIndex &index, const backend::Backend *parent_backend)
{
VERBOSE(HEScheduler::schedule) << "scheduling (" << index.value() << ")" << std::endl;
int64_t eft = std::numeric_limits<int64_t>::max(), selected_exec_time = 0;
}
std::pair<int64_t, int64_t>
-HEScheduler::ESTAndExecTime(const backend::Backend *backend, const model::OperationIndex &index,
+HEScheduler::ESTAndExecTime(const backend::Backend *backend, const ir::OperationIndex &index,
std::multimap<int64_t, int64_t> &transfer_st_exec_time)
{
const bool is_linear_exec = "Linear" == util::getConfigString(util::config::EXECUTOR);
#include "backend/ExecTime.h"
#include "backend/Backend.h"
#include "cpp14/memory.h"
-#include "model/OperationIndexMap.h"
+#include "ir/OperationIndexMap.h"
#include <map>
#include <memory>
* @param[in] model Graph model
* @param[in] backend_resolver backend resolver
*/
- HEScheduler(const neurun::model::Operands &operands,
- std::vector<const backend::Backend *> backends,
+ HEScheduler(const ir::Operands &operands, std::vector<const backend::Backend *> backends,
const std::shared_ptr<backend::custom::IKernelBuilder> &kb)
: _is_supported{}, _backends_avail_time{}, _ops_eft{},
- _op_to_rank{std::make_shared<model::OperationIndexMap<int64_t>>()},
+ _op_to_rank{std::make_shared<ir::OperationIndexMap<int64_t>>()},
_all_backends(std::move(backends))
{
_backend_resolver =
* https://www.hindawi.com/journals/sp/2016/3676149/
*/
std::unique_ptr<compiler::BackendResolver> schedule(const graph::Graph &graph) final;
- std::shared_ptr<model::OperationIndexMap<int64_t>> getIndexedRanks() { return _op_to_rank; }
+ std::shared_ptr<ir::OperationIndexMap<int64_t>> getIndexedRanks() { return _op_to_rank; }
private:
bool isNodeProfiled(const model::Operation &);
- bool schedule(const model::OperationIndex &, const backend::Backend *parent_backend);
+ bool schedule(const ir::OperationIndex &, const backend::Backend *parent_backend);
/**
* @brief Get earliest starting time and execution time of an operation on a backend.
*
* @return earliest starting time and execution time
*/
std::pair<int64_t, int64_t>
- ESTAndExecTime(const backend::Backend *backend, const model::OperationIndex &index,
+ ESTAndExecTime(const backend::Backend *backend, const ir::OperationIndex &index,
std::multimap<int64_t, int64_t> &transfer_st_exec_time);
/**
* @brief Returns the latest finishing time of parents of a node.
void makeRank();
- int64_t DFSMaxRank(const model::OperationIndex &index);
+ int64_t DFSMaxRank(const ir::OperationIndex &index);
- int64_t DFSChildrenMaxRank(const model::OperationIndex &index);
+ int64_t DFSChildrenMaxRank(const ir::OperationIndex &index);
/**
* @brief Returns the time, when backend is available for at least given amount of time.
*
*
* @return N/A
*/
- void scheduleBranch(const model::OperationIndex &index,
- model::OperationIndexMap<bool> &scheduled);
+ void scheduleBranch(const ir::OperationIndex &index, ir::OperationIndexMap<bool> &scheduled);
private:
// This variable stores backend/node pairs with unknown execution time, and hints scheduler
std::unordered_map<const backend::Backend *, std::unordered_map<std::string, bool>> _is_supported;
// Finishing and starting time of each backend
std::unordered_map<const backend::Backend *, std::map<int64_t, int64_t>> _backends_avail_time;
- model::OperationIndexMap<int64_t> _ops_eft;
- std::multimap<int64_t, model::OperationIndex, std::greater<int64_t>> _rank_to_op;
- std::shared_ptr<model::OperationIndexMap<int64_t>> _op_to_rank;
+ ir::OperationIndexMap<int64_t> _ops_eft;
+ std::multimap<int64_t, ir::OperationIndex, std::greater<int64_t>> _rank_to_op;
+ std::shared_ptr<ir::OperationIndexMap<int64_t>> _op_to_rank;
std::unique_ptr<compiler::BackendResolver> _backend_resolver;
std::unique_ptr<backend::ExecTime> _exec_time;
const graph::Graph *_graph{nullptr};
// Get SubgraphSequence by topological sorting
{
model::Subgraphs &subgraphs = _graph.subgraphs();
- model::Operands &operands = _graph.operands();
+ ir::Operands &operands = _graph.operands();
// subgraphs can't access a subgraph by an operand so that input_to_subgs can offer it
- std::unordered_map<model::OperandIndex, std::list<model::SubgraphIndex>> input_to_subgs;
+ std::unordered_map<ir::OperandIndex, std::list<ir::SubgraphIndex>> input_to_subgs;
// Get the relations between input/subgraph to be used for dfs-post-iter
//
// [SUBG3]
// |
// [4]
- subgraphs.iterate([&](const model::SubgraphIndex &subg_idx, model::Subgraph &subg) {
+ subgraphs.iterate([&](const ir::SubgraphIndex &subg_idx, model::Subgraph &subg) {
for (auto input : subg.getInputs())
{
// only valid_inputs
auto it = input_to_subgs.find(input);
if (it == input_to_subgs.end())
{
- std::list<model::SubgraphIndex> list{subg_idx};
+ std::list<ir::SubgraphIndex> list{subg_idx};
input_to_subgs[input] = list;
}
else
}
});
- std::unordered_map<model::SubgraphIndex, bool> visited;
- subgraphs.iterate([&](const model::SubgraphIndex &index, const model::Subgraph &) {
- visited[index] = false;
- });
+ std::unordered_map<ir::SubgraphIndex, bool> visited;
+ subgraphs.iterate(
+ [&](const ir::SubgraphIndex &index, const model::Subgraph &) { visited[index] = false; });
- std::function<void(const model::SubgraphIndex &, model::Subgraph &)> dfs_recursive =
- [&](const model::SubgraphIndex &index, model::Subgraph &subg) -> void {
+ std::function<void(const ir::SubgraphIndex &, model::Subgraph &)> dfs_recursive =
+ [&](const ir::SubgraphIndex &index, model::Subgraph &subg) -> void {
if (visited[index])
return;
visited[index] = true;
subgraphs.iterate(dfs_recursive);
// All of the nodes must have been visited.
- assert(
- std::all_of(visited.begin(), visited.end(),
- [](const std::pair<const model::SubgraphIndex, bool> &v) { return v.second; }));
+ assert(std::all_of(visited.begin(), visited.end(),
+ [](const std::pair<const ir::SubgraphIndex, bool> &v) { return v.second; }));
// NOTE. Now these subgraph are on the reverse order
std::reverse(_elements.begin(), _elements.end());
void Linear::planTensors()
{
- model::OperandIndexMap<std::shared_ptr<backend::ITensorBuilder>> tensor_builder_map;
+ ir::OperandIndexMap<std::shared_ptr<backend::ITensorBuilder>> tensor_builder_map;
// NOTE
// While current ITensorBuilder exposes registerSubTensorInfo for subtensor,
// this stage uses registerSubTensorInfo() and notify{First|Last}Use()
// but handling subtensor should be processed on each backend. See #5726.
- model::OperandIndexMap<uint32_t> uses_map;
- model::OperandIndexMap<uint32_t> def_map;
- model::OperandIndexSequence constants;
+ ir::OperandIndexMap<uint32_t> uses_map;
+ ir::OperandIndexMap<uint32_t> def_map;
+ ir::OperandIndexSequence constants;
iterate([&](const neurun::compiler::Linear::Element &element) {
const auto backend = element.lower_info->backend();
});
// Prepare scanning
- _graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
+ _graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) {
const auto lower_info = _graph.getLowerInfo(ind);
// TODO Remove if neurun doesn't support anymore such as
// GeneratedTests.reshape_quant8_weights_as_inputs
}
}
- assert(std::all_of(
- uses_map.begin(), uses_map.end(),
- [](std::pair<const model::OperandIndex, uint32_t> it) { return it.second == 0; }));
+ assert(
+ std::all_of(uses_map.begin(), uses_map.end(),
+ [](std::pair<const ir::OperandIndex, uint32_t> it) { return it.second == 0; }));
- assert(std::all_of(
- def_map.begin(), def_map.end(),
- [](std::pair<const model::OperandIndex, uint32_t> it) { return it.second == 0; }));
+ assert(
+ std::all_of(def_map.begin(), def_map.end(),
+ [](std::pair<const ir::OperandIndex, uint32_t> it) { return it.second == 0; }));
}
void Linear::iterate(const std::function<void(const Element &element)> &fn) const
*/
#include "ManualScheduler.h"
-#include "model/OpCode.h"
+#include "ir/OpCode.h"
#include "model/Operations.Include.h"
#include "backend/Backend.h"
#include "backend/BackendManager.h"
VERBOSE(ManualScheduler) << "Default backend for all ops: " << backend_all_str << std::endl;
- graph.operations().iterate([&](const model::OperationIndex &index, const model::Operation &) {
+ graph.operations().iterate([&](const ir::OperationIndex &index, const model::Operation &) {
backend_resolver->setBackend(index, backend_all);
});
// 2. Backend per operation type
- std::unordered_map<model::OpCode, backend::Backend *> op_type_map;
+ std::unordered_map<ir::OpCode, backend::Backend *> op_type_map;
// By default, Custom uses cpu backend
- op_type_map[model::OpCode::Custom] = backend::BackendManager::get().get("cpu");
+ op_type_map[ir::OpCode::Custom] = backend::BackendManager::get().get("cpu");
#define OP(InternalName) \
{ \
{ \
auto backend = backend::BackendManager::get().get(backend_str); \
VERBOSE(Lower) << "backend for " << #InternalName << ": " << backend_str << std::endl; \
- op_type_map[model::OpCode::InternalName] = backend; \
+ op_type_map[ir::OpCode::InternalName] = backend; \
} \
}
-#include "model/Operations.lst"
+#include "ir/Operations.lst"
#undef OP
graph.operations().iterate(
- [&](const model::OperationIndex &index, const model::Operation &operation) {
+ [&](const ir::OperationIndex &index, const model::Operation &operation) {
auto itr = op_type_map.find(operation.opcode());
if (itr != op_type_map.end())
{
const auto &val = key_val.at(1);
auto key = static_cast<uint32_t>(std::stoi(key_str));
- graph.operations().at(model::OperationIndex{key}); // Check if exist, or this wil throw
- backend_resolver->setBackend(model::OperationIndex{key},
+ graph.operations().at(ir::OperationIndex{key}); // Check if exist, or this wil throw
+ backend_resolver->setBackend(ir::OperationIndex{key},
backend::BackendManager::get().get(val));
}
}
// 4. Operations that are specially handled
// All configuration above will be ignored(overwritten)
- op_type_map[model::OpCode::Permute] = backend::BackendManager::get().get("cpu");
+ op_type_map[ir::OpCode::Permute] = backend::BackendManager::get().get("cpu");
// Dump final assignment
backend_resolver->iterate(
- [&](const model::OperationIndex &index, const backend::BackendContext &backend_ctx) {
+ [&](const ir::OperationIndex &index, const backend::BackendContext &backend_ctx) {
VERBOSE(ManualScheduler) << "backend for operation #" << index.value() << ": "
<< backend_ctx.backend->config()->id() << std::endl;
});
namespace compiler
{
-OperandContext &OperandContext::set(const model::OperandIndex &id,
+OperandContext &OperandContext::set(const ir::OperandIndex &id,
const std::shared_ptr<backend::operand::ITensor> &tensor)
{
// Only one tensor for an id
}
void OperandContext::iterate(
- const std::function<void(const model::OperandIndex &, backend::operand::ITensor &)> &fn)
+ const std::function<void(const ir::OperandIndex &, backend::operand::ITensor &)> &fn)
{
for (auto &e : _tensors)
{
#define __NEURUN_COMPILER_OPERAND_CONTEXT_H__
#include "backend/operand/ITensor.h"
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
#include <unordered_map>
#include <memory>
class OperandContext
{
public:
- OperandContext &set(const model::OperandIndex &ind,
+ OperandContext &set(const ir::OperandIndex &ind,
const std::shared_ptr<backend::operand::ITensor> &tensor);
public:
- bool exist(const ::neurun::model::OperandIndex &ind) const
- {
- return _tensors.find(ind) != _tensors.end();
- }
+ bool exist(const ir::OperandIndex &ind) const { return _tensors.find(ind) != _tensors.end(); }
public:
- std::shared_ptr<backend::operand::ITensor> at(const model::OperandIndex &ind) const
+ std::shared_ptr<backend::operand::ITensor> at(const ir::OperandIndex &ind) const
{
return _tensors.at(ind);
}
- std::shared_ptr<backend::operand::ITensor> &at(const model::OperandIndex &ind)
+ std::shared_ptr<backend::operand::ITensor> &at(const ir::OperandIndex &ind)
{
return _tensors.at(ind);
}
void
- iterate(const std::function<void(const model::OperandIndex &, backend::operand::ITensor &)> &fn);
+ iterate(const std::function<void(const ir::OperandIndex &, backend::operand::ITensor &)> &fn);
private:
- model::OperandIndexMap<std::shared_ptr<backend::operand::ITensor>> _tensors;
+ ir::OperandIndexMap<std::shared_ptr<backend::operand::ITensor>> _tensors;
};
} // namespace compiler
#include <typeinfo>
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "ir/operation/LowerInfo.h"
#include "util/logging.h"
namespace neurun
{
-namespace model
+namespace ir
{
class Operands;
-} // namespace model
+} // namespace ir
} // namespace neurun
namespace neurun
class OperationValidator : public model::OperationVisitor
{
public:
- OperationValidator(const neurun::model::Operands &ctx)
- : _ctx{ctx}, _current_subg_layout{ir::Layout::UNKNOWN}
+ OperationValidator(const ir::Operands &ctx) : _ctx{ctx}, _current_subg_layout{ir::Layout::UNKNOWN}
{
}
void visit(const model::operation::Pad &node) override;
private:
- const neurun::model::Operands &_ctx;
+ const ir::Operands &_ctx;
ir::Layout _current_subg_layout;
};
void ParamChecker::operator()()
{
_model->operations().iterate(
- [&](const model::OperationIndex &, const model::Operation &node) { node.accept(*this); });
+ [&](const ir::OperationIndex &, const model::Operation &node) { node.accept(*this); });
}
} // namespace compiler
#include <typeinfo>
#include "cpp14/memory.h"
-#include "model/OperandIndexSequence.h"
+#include "ir/OperandIndexSequence.h"
#include "util/logging.h"
#include "util/Coordinates.h"
* @brief Construct a new SubTensorAnalyzer object
* @param[in] ctx Graph operand set
*/
- SubTensorAnalyzer(neurun::model::Operands &ctx) : _ctx{ctx}
+ SubTensorAnalyzer(ir::Operands &ctx) : _ctx{ctx}
{
// DO NOTHING
}
void visit(const model::operation::Concat &) override;
private:
- neurun::model::Operands &_ctx; // TODO Refactor : Do not update Operands
+ ir::Operands &_ctx; // TODO Refactor : Do not update Operands
};
} // namespace compiler
#include <sstream>
-#include "model/Index.h"
+#include "ir/Index.h"
#include "model/Operation.h"
-#include "model/Operand.h"
+#include "ir/Operand.h"
#include "OperationNode.h"
#include "OperandNode.h"
#include "DotSubgraphInfo.h"
using Operation = neurun::model::Operation;
-using Object = neurun::model::Operand;
+using Object = neurun::ir::Operand;
namespace neurun
{
#include "DotBuilder.h"
#include "DotSubgraphInfo.h"
#include "model/Subgraph.h"
-#include "model/OperationIndexMap.h"
+#include "ir/OperationIndexMap.h"
#include "backend/Backend.h"
#include "backend/BackendManager.h"
#include "backend/IConfig.h"
auto &operations = _graph.operations();
auto &operands = _graph.operands();
- model::OperationIndexMap<std::unique_ptr<Operation>> operation_nodes;
- std::unordered_map<model::OperandIndex, std::unique_ptr<Operand>> operand_nodes;
+ ir::OperationIndexMap<std::unique_ptr<Operation>> operation_nodes;
+ std::unordered_map<ir::OperandIndex, std::unique_ptr<Operand>> operand_nodes;
- operations.iterate([&](const model::OperationIndex &index, const model::Operation &op) {
+ operations.iterate([&](const ir::OperationIndex &index, const model::Operation &op) {
auto node = nnfw::cpp14::make_unique<Operation>(index, op);
for (auto output : op.getOutputs())
}
};
- util::Set<model::OperandIndex> shown_operand_set;
+ util::Set<ir::OperandIndex> shown_operand_set;
- operands.iterate([&](const model::OperandIndex &index, const model::Operand &object) {
+ operands.iterate([&](const ir::OperandIndex &index, const ir::Operand &object) {
bool showing_cond = false;
if (_level == Level::ALL)
{
const auto subgraphs = _graph.subgraphs();
if (subgraphs)
{
- subgraphs->iterate([&](const model::SubgraphIndex &index, const model::Subgraph &subgraph) {
+ subgraphs->iterate([&](const ir::SubgraphIndex &index, const model::Subgraph &subgraph) {
const auto lower_info = _graph.getLowerInfo(index);
auto fillcolor = backend_to_fillcolor(lower_info->backend());
std::string label =
namespace dot
{
-DotSubgraphInfo::DotSubgraphInfo(const model::SubgraphIndex &index, const model::Subgraph &subgraph,
- const util::Set<model::OperandIndex> &shown_operands)
+DotSubgraphInfo::DotSubgraphInfo(const ir::SubgraphIndex &index, const model::Subgraph &subgraph,
+ const util::Set<ir::OperandIndex> &shown_operands)
: _index{index}
{
for (const auto &element : subgraph.operations())
#include <unordered_set>
-#include "model/Index.h"
+#include "ir/Index.h"
#include "model/Subgraph.h"
#include "util/Set.h"
class DotSubgraphInfo
{
public:
- DotSubgraphInfo(const model::SubgraphIndex &index, const model::Subgraph &subgraph,
- const util::Set<model::OperandIndex> &shown_operands);
+ DotSubgraphInfo(const ir::SubgraphIndex &index, const model::Subgraph &subgraph,
+ const util::Set<ir::OperandIndex> &shown_operands);
- model::SubgraphIndex index() const { return _index; }
+ ir::SubgraphIndex index() const { return _index; }
std::string label() const { return _label; }
void label(const std::string &val) { _label = val; }
std::string fillcolor() const { return _fillcolor; }
void fillcolor(const std::string &val) { _fillcolor = val; }
- const std::unordered_set<model::OperationIndex> &operations() const { return _operations; }
- const std::unordered_set<model::OperandIndex> &operands() const { return _operands; }
+ const std::unordered_set<ir::OperationIndex> &operations() const { return _operations; }
+ const std::unordered_set<ir::OperandIndex> &operands() const { return _operands; }
private:
- model::SubgraphIndex _index;
+ ir::SubgraphIndex _index;
std::string _label;
std::string _fillcolor;
- std::unordered_set<model::OperationIndex> _operations;
- std::unordered_set<model::OperandIndex> _operands;
+ std::unordered_set<ir::OperationIndex> _operations;
+ std::unordered_set<ir::OperandIndex> _operands;
};
} // namespace dot
const std::string Operand::OPERAND_SHAPE = "ellipse";
const std::string Operand::BG_COLOR_SCHEME = "set18";
-Operand::Operand(const neurun::model::OperandIndex &index, Type type)
+Operand::Operand(const ir::OperandIndex &index, Type type)
: Node{"operand" + std::to_string(index.value())}
{
{
#include <vector>
#include "Node.h"
-#include "model/Operand.h"
-#include "model/Index.h"
+#include "ir/Operand.h"
+#include "ir/Index.h"
namespace neurun
{
* @param[in] type Operand type
* @param[in] lower_info Operand LowerInfo
*/
- Operand(const neurun::model::OperandIndex &index, Type type);
+ Operand(const ir::OperandIndex &index, Type type);
private:
void addBackendLabel();
const std::string Operation::OPERATION_SHAPE = "rect";
const std::string Operation::BG_COLOR_SCHEME = "pastel18";
-Operation::Operation(const neurun::model::OperationIndex &index,
- const neurun::model::Operation &node)
+Operation::Operation(const ir::OperationIndex &index, const neurun::model::Operation &node)
: Node{"operation" + std::to_string(index.value())}
{
setAttribute("label", std::to_string(index.value()) + " : " + node.name());
#include "Node.h"
#include "model/Operation.h"
-#include "model/Index.h"
+#include "ir/Index.h"
namespace neurun
{
* @param[in] index operation index
* @param[in] node operation object
*/
- Operation(const neurun::model::OperationIndex &index, const neurun::model::Operation &node);
+ Operation(const ir::OperationIndex &index, const neurun::model::Operation &node);
};
} // namespace dot
auto it = _indexed_ranks->find(element.index);
if (it == _indexed_ranks->end())
{
- assert(element.node->opcode() == model::OpCode::Permute);
+ assert(element.node->opcode() == ir::OpCode::Permute);
// assign int32_t::max to prevent integer overflow
rank += std::numeric_limits<int32_t>::max();
}
const model::Subgraphs *subgraphs = _graph.subgraphs();
// Assign jobs convert SubgraphIndex to job index(uint32_t)
uint32_t next_job_index = 0;
- std::unordered_map<model::SubgraphIndex, uint32_t> subgraph_to_job;
- subgraphs->iterate([&](const model::SubgraphIndex &subg_index, const model::Subgraph &) {
+ std::unordered_map<ir::SubgraphIndex, uint32_t> subgraph_to_job;
+ subgraphs->iterate([&](const ir::SubgraphIndex &subg_index, const model::Subgraph &) {
VERBOSE(DataflowExecutor) << "Create a job #" << next_job_index << " with SubgraphIndex "
<< subg_index.value() << std::endl;
_finished_jobs.emplace_back(
_output_info.resize(next_job_index);
_initial_input_info.resize(next_job_index, 0);
- subgraphs->iterate([&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
+ subgraphs->iterate([&](const ir::SubgraphIndex &subg_index, const model::Subgraph &subg) {
auto job_index = subgraph_to_job[subg_index];
for (auto output : subg.getOutputs())
{
// Update output and input info
subgraphs->iterate(
- [&](const model::SubgraphIndex &subg_cur_index, const model::Subgraph &subg_cur) {
+ [&](const ir::SubgraphIndex &subg_cur_index, const model::Subgraph &subg_cur) {
if (subg_cur.getInputs().contains(output))
{
auto dep_index = subgraph_to_job[subg_cur_index];
#include "FunctionSequence.h"
#include "Job.h"
-#include "model/OperandIndexSequence.h"
-#include "model/Index.h"
+#include "ir/OperandIndexSequence.h"
+#include "ir/Index.h"
#include "cpp14/memory.h"
#include "exec/ExecutorBase.h"
class DataflowExecutor : public ExecutorBase
{
public:
- using CodeMap = std::unordered_map<model::SubgraphIndex, std::unique_ptr<FunctionSequence>>;
+ using CodeMap = std::unordered_map<ir::SubgraphIndex, std::unique_ptr<FunctionSequence>>;
protected:
virtual void notify(uint32_t finished_job_id);
std::multimap<int64_t, std::unique_ptr<Job>, std::greater<int64_t>> _ready_jobs;
/// @brief Which job runs which op and function.
- std::unordered_map<uint32_t, model::SubgraphIndex> _job_to_subgraph;
+ std::unordered_map<uint32_t, ir::SubgraphIndex> _job_to_subgraph;
};
} // namespace exec
}
// TODO Remove default parameter
-void Execution::setInput(const model::IOIndex &index, const void *buffer, size_t length,
+void Execution::setInput(const ir::IOIndex &index, const void *buffer, size_t length,
ir::Layout layout)
{
const auto input_index = graph().getInputs().at(index);
}
// TODO Remove default parameter
-void Execution::setInput(const model::IOIndex &index, const ir::TypeInfo &type,
- const ir::Shape &shape, const void *buffer, size_t length,
- ir::Layout layout)
+void Execution::setInput(const ir::IOIndex &index, const ir::TypeInfo &type, const ir::Shape &shape,
+ const void *buffer, size_t length, ir::Layout layout)
{
- const model::OperandInfo info{shape, type};
+ const ir::OperandInfo info{shape, type};
if (length < info.total_size())
{
}
// TODO Remove default parameter
-void Execution::setOutput(const model::IOIndex &index, void *buffer, size_t length,
- ir::Layout layout)
+void Execution::setOutput(const ir::IOIndex &index, void *buffer, size_t length, ir::Layout layout)
{
const auto output_index = graph().getOutputs().at(index);
const auto info = graph().operands().at(output_index).info();
}
// TODO Remove default parameter
-void Execution::setOutput(const model::IOIndex &index, const ir::TypeInfo &type,
+void Execution::setOutput(const ir::IOIndex &index, const ir::TypeInfo &type,
const ir::Shape &shape, void *buffer, size_t length, ir::Layout layout)
{
- const model::OperandInfo info{shape, type};
+ const ir::OperandInfo info{shape, type};
if (length < info.total_size())
{
nnfw::cpp14::make_unique<OutputDesc>(info, buffer, length, layout);
}
-void Execution::setInputLayout(const model::IOIndex &index, ir::Layout layout)
+void Execution::setInputLayout(const ir::IOIndex &index, ir::Layout layout)
{
const auto &input_desc = _io_desc.inputs.at(index.value());
_io_desc.inputs.at(index.value()) = nnfw::cpp14::make_unique<InputDesc>(
input_desc->info, input_desc->buffer, input_desc->size, layout);
}
-void Execution::setOutputLayout(const model::IOIndex &index, ir::Layout layout)
+void Execution::setOutputLayout(const ir::IOIndex &index, ir::Layout layout)
{
const auto &output_desc = _io_desc.outputs.at(index.value());
_io_desc.outputs.at(index.value()) = nnfw::cpp14::make_unique<OutputDesc>(
// DO NOTHING
}
-std::unique_ptr<ISource> ExecutorBase::source(const model::IOIndex &index, const ir::TypeInfo &type,
+std::unique_ptr<ISource> ExecutorBase::source(const ir::IOIndex &index, const ir::TypeInfo &type,
const void *buffer, size_t length,
ir::Layout io_layout)
{
}
}
-std::unique_ptr<ISink> ExecutorBase::sink(const model::IOIndex &index, const ir::TypeInfo &type,
+std::unique_ptr<ISink> ExecutorBase::sink(const ir::IOIndex &index, const ir::TypeInfo &type,
void *buffer, size_t length, ir::Layout io_layout)
{
using ir::DataType;
// Set input(s)
for (uint32_t n = 0; n < _graph.getInputs().size(); ++n)
{
- model::IOIndex input_index{n};
- model::OperandIndex index{_graph.getInputs().at(input_index)};
+ ir::IOIndex input_index{n};
+ ir::OperandIndex index{_graph.getInputs().at(input_index)};
if (desc.inputs.at(n) == nullptr)
{
// Get output(s)
for (uint32_t n = 0; n < _graph.getOutputs().size(); ++n)
{
- neurun::model::IOIndex output_index{n};
+ ir::IOIndex output_index{n};
// Optional output
if (desc.outputs.at(n) == nullptr)
{
auto getter = [&](::neurun::backend::operand::ITensor &tensor) { sinks.at(n)->pull(tensor); };
- ::neurun::model::OperandIndex index{_graph.getOutputs().at(output_index)};
+ ir::OperandIndex index{_graph.getOutputs().at(output_index)};
auto object = _operand_context->at(index);
object->access(getter);
void execute(const IODescription &desc) final;
// Used only in Dataflow and Parallel Executors
- void setIndexedRanks(std::shared_ptr<model::OperationIndexMap<int64_t>> ranks) final
+ void setIndexedRanks(std::shared_ptr<ir::OperationIndexMap<int64_t>> ranks) final
{
_indexed_ranks = std::move(ranks);
};
void addObserver(std::unique_ptr<IExecutionObserver> ref) { _subject.add(std::move(ref)); };
private:
- std::unique_ptr<ISource> source(const model::IOIndex &index, const ir::TypeInfo &type,
+ std::unique_ptr<ISource> source(const ir::IOIndex &index, const ir::TypeInfo &type,
const void *buffer, size_t length, ir::Layout io_layout);
- std::unique_ptr<ISink> sink(const model::IOIndex &index, const ir::TypeInfo &type, void *buffer,
+ std::unique_ptr<ISink> sink(const ir::IOIndex &index, const ir::TypeInfo &type, void *buffer,
size_t length, ir::Layout io_layout);
template <typename T>
- std::unique_ptr<ISource> source(const model::IOIndex &index, const void *buffer, size_t length,
+ std::unique_ptr<ISource> source(const ir::IOIndex &index, const void *buffer, size_t length,
ir::Layout io_layout)
{
const auto operand_index = _graph.getInputs().at(index);
}
template <typename T>
- std::unique_ptr<ISink> sink(const model::IOIndex &index, void *buffer, size_t length,
+ std::unique_ptr<ISink> sink(const ir::IOIndex &index, void *buffer, size_t length,
ir::Layout io_layout)
{
const auto operand_index = _graph.getOutputs().at(index);
protected:
ExecutionObservee _subject;
- std::shared_ptr<model::OperationIndexMap<int64_t>> _indexed_ranks;
+ std::shared_ptr<ir::OperationIndexMap<int64_t>> _indexed_ranks;
const graph::Graph &_graph;
std::shared_ptr<compiler::OperandContext> _operand_context;
std::unique_ptr<backend::TensorManagerSet> _tensor_mgrs;
#include <unordered_set>
#include "exec/IFunction.h"
-#include "model/Index.h"
-#include "model/OperandIndexSequence.h"
+#include "ir/Index.h"
+#include "ir/OperandIndexSequence.h"
#include "backend/Backend.h"
namespace neurun
#include "FunctionSequence.h"
#include "Job.h"
-#include "model/OperandIndexSequence.h"
-#include "model/Index.h"
+#include "ir/OperandIndexSequence.h"
+#include "ir/Index.h"
#include "cpp14/memory.h"
#include "exec/DataflowExecutor.h"
#include "ParallelScheduler.h"
#include <cpp14/memory.h>
-#include "model/Data.h"
+#include "ir/Data.h"
namespace neurun
{
/**
* @brief Interface for writable data area
*/
-class Buffer : public model::Data
+class Buffer : public ir::Data
{
public:
/**
* @param[in] index Tensor index
* @param[in] tensor Tensor
*/
- void assignTensor(const model::OperandIndex index, std::shared_ptr<ITensor> tensor)
+ void assignTensor(const ir::OperandIndex index, std::shared_ptr<ITensor> tensor)
{
assert(tensor->bufferRO() != nullptr);
_tensors.emplace(index, tensor);
* @param[in] index Tensor index
* @return Tensor pointer
*/
- const ITensor *tensorAt(const model::OperandIndex index) const
- {
- return _tensors.at(index).get();
- }
+ const ITensor *tensorAt(const ir::OperandIndex index) const { return _tensors.at(index).get(); }
/**
* @brief Check environment contains tensor
* @param[in] index Tensor index
* @return @c true if environment contain tensor, otherwise @c false
*/
- bool contains(const model::OperandIndex index) const
+ bool contains(const ir::OperandIndex index) const
{
return (_tensors.find(index) != _tensors.end());
}
* @note If already allocated, just return
* @TODO More smart allocation policy
*/
- void allocateIfNeeded(const model::OperandIndex index, const model::OperandInfo &info)
+ void allocateIfNeeded(const ir::OperandIndex index, const ir::OperandInfo &info)
{
// already allocated, or constant
if (contains(index))
* @param[in] info Operand info
* @param[in] index_to_share Tensor index that have data to share
*/
- void allocateAndShareIfNeeded(const model::OperandIndex index, const model::OperandInfo &info,
- const model::OperandIndex index_to_share)
+ void allocateAndShareIfNeeded(const ir::OperandIndex index, const ir::OperandInfo &info,
+ const ir::OperandIndex index_to_share)
{
if (!contains(index_to_share))
{
* @param[in] index Tensor index
* @note If allocated by outside, just return
*/
- void freeIfAllocated(const model::OperandIndex index)
+ void freeIfAllocated(const ir::OperandIndex index)
{
if (_buffers.find(index) != _buffers.end())
{
const graph::Graph &_graph;
// Tensor map to use in interpreter
// It should map tensors that have allocated or assigned buffer pointer
- std::unordered_map<model::OperandIndex, std::shared_ptr<ITensor>> _tensors;
+ std::unordered_map<ir::OperandIndex, std::shared_ptr<ITensor>> _tensors;
// Tensors allocated by allocateIfNeed (buffer)
- std::unordered_set<model::OperandIndex> _buffers;
+ std::unordered_set<ir::OperandIndex> _buffers;
};
} // namespace interp
It may execute divided model
but now consider model inference is done at interpreter
***********************************************************************/
- model::OperandIndexMap<std::shared_ptr<ITensor>> tensor_map;
+ ir::OperandIndexMap<std::shared_ptr<ITensor>> tensor_map;
for (uint32_t n = 0; n < _graph.getInputs().size(); n++)
{
- neurun::model::IOIndex index{n};
+ ir::IOIndex index{n};
const auto input_index = _graph.getInputs().at(index);
const auto &input = *desc.inputs.at(n);
auto input_tensor = std::make_shared<ROTensor>(input.info);
- input_tensor->setData(std::make_shared<const model::ExternalData>(
+ input_tensor->setData(std::make_shared<const ir::ExternalData>(
reinterpret_cast<const uint8_t *>(input.buffer), input.size));
tensor_map[input_index] = input_tensor;
}
for (uint32_t n = 0; n < _graph.getOutputs().size(); n++)
{
- neurun::model::IOIndex index{n};
+ ir::IOIndex index{n};
const auto output_index = _graph.getOutputs().at(index);
const auto &output = *desc.outputs.at(n);
}
// Allocate constant tensor
- _graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
+ _graph.operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) {
if (obj.isConstant())
{
VERBOSE(INTERPRETER) << "Allocate and assign constant tensor. operand index:" << ind.value()
auto const_tensor = std::make_shared<ROTensor>(obj.info());
// Assume that interpreter's tensor layout is same with model (NHWC)
const_tensor->setData(
- std::make_shared<model::ExternalData>(obj.data().base(), obj.info().total_size()));
+ std::make_shared<ir::ExternalData>(obj.data().base(), obj.info().total_size()));
interp_env->assignTensor(ind, const_tensor);
}
});
* @return Graph object
*/
const graph::Graph &graph() final { return _graph; }
- void setIndexedRanks(std::shared_ptr<model::OperationIndexMap<int64_t>>) override{
+ void setIndexedRanks(std::shared_ptr<ir::OperationIndexMap<int64_t>>) override{
// Not implemented
};
/**
private:
const graph::Graph &_graph;
- model::OperandIndexMap<std::shared_ptr<ITensor>> _tensor_map;
+ ir::OperandIndexMap<std::shared_ptr<ITensor>> _tensor_map;
};
} // namespace interp
#include "Registration.h"
-#include "model/OperandIndexMap.h"
+#include "ir/OperandIndexMap.h"
#include "util/logging.h"
#include "model/OperationVisitor.h"
public:
OperationExecutor(ExecEnv *env) : _env{env}
{
- _kernels[model::OpCode::Add] = getAdd();
- _kernels[model::OpCode::Conv2D] = getConv2D();
- _kernels[model::OpCode::MaxPool2D] = getMaxPool2D();
- _kernels[model::OpCode::Concat] = getConcat();
- _kernels[model::OpCode::AvgPool2D] = getAvgPool2D();
- _kernels[model::OpCode::FullyConnected] = getFullyConnected();
- _kernels[model::OpCode::Softmax] = getSoftMax();
- _kernels[model::OpCode::Reshape] = getReshape();
- _kernels[model::OpCode::DepthwiseConv2D] = getDepthwiseConv();
+ _kernels[ir::OpCode::Add] = getAdd();
+ _kernels[ir::OpCode::Conv2D] = getConv2D();
+ _kernels[ir::OpCode::MaxPool2D] = getMaxPool2D();
+ _kernels[ir::OpCode::Concat] = getConcat();
+ _kernels[ir::OpCode::AvgPool2D] = getAvgPool2D();
+ _kernels[ir::OpCode::FullyConnected] = getFullyConnected();
+ _kernels[ir::OpCode::Softmax] = getSoftMax();
+ _kernels[ir::OpCode::Reshape] = getReshape();
+ _kernels[ir::OpCode::DepthwiseConv2D] = getDepthwiseConv();
}
- void execute(const model::OperationIndex &idx)
+ void execute(const ir::OperationIndex &idx)
{
const auto nodeName = _env->graph().operations().at(idx).name();
VERBOSE(INTERPRETER) << "Prepare output operands and execute " << nodeName
#define OP(InternalName) \
virtual void visit(const model::operation::InternalName &node) override \
{ \
- if (_kernels[model::OpCode::InternalName]->prepare != nullptr) \
+ if (_kernels[ir::OpCode::InternalName]->prepare != nullptr) \
{ \
- _kernels[model::OpCode::InternalName]->prepare(_env, node); \
+ _kernels[ir::OpCode::InternalName]->prepare(_env, node); \
} \
- _kernels[model::OpCode::InternalName]->invoke(_env, node); \
+ _kernels[ir::OpCode::InternalName]->invoke(_env, node); \
}
-#include "model/Operations.lst"
+#include "ir/Operations.lst"
#undef OP
private:
ExecEnv *_env;
- std::unordered_map<model::OpCode, OpKernel *> _kernels;
+ std::unordered_map<ir::OpCode, OpKernel *> _kernels;
};
void Interpreter::run()
VERBOSE(INTERPRETER) << "Interpreter is invoked " << std::endl;
// operand_stack: save operands prepared to use
- std::stack<model::OperandIndex> operand_stack;
+ std::stack<ir::OperandIndex> operand_stack;
// Note: We should push input first, then constant.
// We use use-def for find operators ready to execution,
operand_stack.push(ind);
}
- _env->graph().operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
+ _env->graph().operands().iterate([&](const ir::OperandIndex &ind, const ir::Operand &obj) {
if (obj.isConstant())
{
VERBOSE(INTERPRETER) << "Constant: Push to operand stack " << ind.value() << std::endl;
});
// Execution
- std::unordered_set<model::OperandIndex> ready_check;
- std::unordered_set<model::OperationIndex> executed;
+ std::unordered_set<ir::OperandIndex> ready_check;
+ std::unordered_set<ir::OperationIndex> executed;
OperationExecutor executor{_env.get()};
while (!operand_stack.empty())
{
ready_check.insert(current_operand_index);
// Find prepared operations by scan use of current operand
- std::stack<model::OperationIndex> operation_stack;
+ std::stack<ir::OperationIndex> operation_stack;
const auto use_operators = _env->graph().operands().at(current_operand_index).getUses();
for (auto use_operator : use_operators.list())
{
#include "Buffer.h"
-#include "model/OperandInfo.h"
+#include "ir/OperandInfo.h"
#include "backend/operand/ITensor.h"
#include "ir/Layout.h"
* @brief Return shared pointer for data
* @return Data shared pointer
*/
- virtual std::shared_ptr<const model::Data> shareData() const = 0;
+ virtual std::shared_ptr<const ir::Data> shareData() const = 0;
/**
* @brief Set internal/external buffer
* @param[in] buffer Buffer pointer
* @brief Set data reference (including constant, input)
* @param[in] data Data pointer
*/
- virtual void setData(std::shared_ptr<const model::Data> data) = 0;
+ virtual void setData(std::shared_ptr<const ir::Data> data) = 0;
virtual void releaseData() = 0;
virtual size_t total_size() const = 0;
* @brief Return TensorInfo
* @return TensorInfo
*/
- virtual const model::OperandInfo &tensorInfo() const = 0;
+ virtual const ir::OperandInfo &tensorInfo() const = 0;
/**
* @brief Return number of elements
* @return Number of elements
{
public:
ROTensor() = delete;
- ROTensor(const model::OperandInfo &info) : _info(info)
+ ROTensor(const ir::OperandInfo &info) : _info(info)
{
// DO NOTHING
}
throw std::runtime_error{"Read only tensor"};
}
const uint8_t *bufferRO() const override { return _data->base(); }
- std::shared_ptr<const model::Data> shareData() const override { return _data; }
+ std::shared_ptr<const ir::Data> shareData() const override { return _data; }
void setBuffer(std::shared_ptr<const Buffer> buffer) override { _data = buffer; }
- void setData(std::shared_ptr<const model::Data> data) override { _data = data; }
+ void setData(std::shared_ptr<const ir::Data> data) override { _data = data; }
void releaseData() override { _data = nullptr; }
size_t total_size() const override { return _info.total_size(); }
ir::Layout layout() const override;
bool has_padding() const override { return false; }
ir::DataType data_type() const override { return _info.typeInfo().type(); }
- const model::OperandInfo &tensorInfo() const override { return _info; }
+ const ir::OperandInfo &tensorInfo() const override { return _info; }
uint64_t num_elements() const override { return _info.shape().num_elements(); };
private:
- const model::OperandInfo _info;
- std::shared_ptr<const model::Data> _data{nullptr};
+ const ir::OperandInfo _info;
+ std::shared_ptr<const ir::Data> _data{nullptr};
};
/**
{
public:
Tensor() = delete;
- Tensor(const model::OperandInfo &info) : _info(info)
+ Tensor(const ir::OperandInfo &info) : _info(info)
{
// DO NOTHING
}
uint8_t *buffer() const override { return _buffer->baseWritable(); }
std::shared_ptr<const Buffer> shareBuffer() const override { return _buffer; };
const uint8_t *bufferRO() const override { return _buffer->base(); }
- std::shared_ptr<const model::Data> shareData() const override { return _buffer; }
+ std::shared_ptr<const ir::Data> shareData() const override { return _buffer; }
void setBuffer(std::shared_ptr<const Buffer> buffer) override { _buffer = buffer; }
- void setData(std::shared_ptr<const model::Data>) override
+ void setData(std::shared_ptr<const ir::Data>) override
{
throw std::runtime_error{"Passed data may read-only"};
}
ir::Layout layout() const override;
bool has_padding() const override { return false; }
ir::DataType data_type() const override { return _info.typeInfo().type(); }
- const model::OperandInfo &tensorInfo() const override { return _info; }
+ const ir::OperandInfo &tensorInfo() const override { return _info; }
uint64_t num_elements() const override { return _info.shape().num_elements(); };
private:
- const model::OperandInfo _info;
+ const ir::OperandInfo _info;
std::shared_ptr<const Buffer> _buffer{nullptr};
};
auto out_shape = first_tensor->tensorInfo().shape();
out_shape.dim(axis) = out_axis_dimension;
env->allocateIfNeeded(out_index,
- model::OperandInfo{out_shape, first_tensor->tensorInfo().typeInfo()});
+ ir::OperandInfo{out_shape, first_tensor->tensorInfo().typeInfo()});
auto out_tensor = env->tensorAt(out_index);
UNUSED_RELEASE(out_tensor);
ir::Shape output_shape(2);
output_shape.dim(0) = batch_size;
output_shape.dim(1) = num_units;
- const model::OperandInfo out_info{output_shape, in_tensor->tensorInfo().typeInfo()};
+ const ir::OperandInfo out_info{output_shape, in_tensor->tensorInfo().typeInfo()};
env->allocateIfNeeded(out_index, out_info);
auto out_tensor = env->tensorAt(out_index);
const auto output_shape = env->graph().operands().at(in_index).info().shape();
const auto output_type = env->graph().operands().at(out_index).info().typeInfo();
- const model::OperandInfo output_info{output_shape, output_type};
+ const ir::OperandInfo output_info{output_shape, output_type};
env->allocateIfNeeded(out_index, output_info);
auto out_tensor = env->tensorAt(out_index);
Graph::~Graph(void) = default;
-model::OperandIndex Graph::addOperand(const ir::Shape &shape, const ir::TypeInfo &type)
+ir::OperandIndex Graph::addOperand(const ir::Shape &shape, const ir::TypeInfo &type)
{
return _operands.emplace(shape, type);
}
-model::OperationIndex Graph::addOperation(std::unique_ptr<model::Operation> &&node)
+ir::OperationIndex Graph::addOperation(std::unique_ptr<model::Operation> &&node)
{
assert(isBuildingPhase());
return _operations.push(std::move(node));
}
-void Graph::setOperandValue(const model::OperandIndex &ind, std::unique_ptr<model::Data> &&data)
+void Graph::setOperandValue(const ir::OperandIndex &ind, std::unique_ptr<ir::Data> &&data)
{
assert(isBuildingPhase());
assert(_operands.exist(ind));
_operands.at(ind).data(std::move(data));
}
-void Graph::addInput(const model::OperandIndex &ind)
+void Graph::addInput(const ir::OperandIndex &ind)
{
assert(isBuildingPhase());
_inputs.append(ind);
}
-void Graph::addOutput(const model::OperandIndex &ind)
+void Graph::addOutput(const ir::OperandIndex &ind)
{
assert(isBuildingPhase());
_outputs.append(ind);
// Lower
{
// operand::LowerInfo holder
- model::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> operands_lower_info;
+ ir::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> operands_lower_info;
- _operands.iterate([&](const model::OperandIndex &index, const model::Operand &object) {
+ _operands.iterate([&](const ir::OperandIndex &index, const ir::Operand &object) {
operands_lower_info[index] =
nnfw::cpp14::make_unique<operand::LowerInfo>(graph::operand::asShape4D(object.shape()));
});
// Make subgraphs while checking whether a node can be merged into a subgraph.
makeSubgraphs(operands_lower_info);
- _subgraphs->iterate([&](const model::SubgraphIndex &, model::Subgraph &subg) {
+ _subgraphs->iterate([&](const ir::SubgraphIndex &, model::Subgraph &subg) {
assert(subg.operations().size() > 0);
std::reverse(std::begin(subg.operations()), std::end(subg.operations()));
});
void Graph::initializeUseDef()
{
- operations().iterate(
- [&](const model::OperationIndex &index, const model::Operation &node) -> void {
- auto outputs = node.getOutputs();
- for (auto output : outputs)
- {
- operands().at(output).appendDef(index);
- }
+ operations().iterate([&](const ir::OperationIndex &index, const model::Operation &node) -> void {
+ auto outputs = node.getOutputs();
+ for (auto output : outputs)
+ {
+ operands().at(output).appendDef(index);
+ }
- auto inputs = node.getInputs();
- for (auto input : inputs)
- {
- operands().at(input).appendUse(index);
- }
- });
+ auto inputs = node.getInputs();
+ for (auto input : inputs)
+ {
+ operands().at(input).appendUse(index);
+ }
+ });
}
-const operation::LowerInfo *Graph::getLowerInfo(const model::SubgraphIndex &subg_index) const
+const operation::LowerInfo *Graph::getLowerInfo(const ir::SubgraphIndex &subg_index) const
{
if (!_lower_info_map)
return nullptr;
return itr->second.get();
}
-void Graph::setLowerInfo(const model::SubgraphIndex &subg_index,
+void Graph::setLowerInfo(const ir::SubgraphIndex &subg_index,
std::unique_ptr<operation::LowerInfo> &&lower_info)
{
assert(_lower_info_map);
_lower_info_map->operation.insert(std::make_pair(subg_index, std::move(lower_info)));
}
-void Graph::removeLowerInfo(const model::SubgraphIndex &subg_index)
+void Graph::removeLowerInfo(const ir::SubgraphIndex &subg_index)
{
auto &subg_lower_info = _lower_info_map->operation;
assert(subg_lower_info.find(subg_index) != subg_lower_info.end());
}
}
-const operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index) const
+const operand::LowerInfo *Graph::getLowerInfo(const ir::OperandIndex &index) const
{
if (!_lower_info_map)
return nullptr;
return itr->second.get();
}
-operand::LowerInfo *Graph::getLowerInfo(const model::OperandIndex &index)
+operand::LowerInfo *Graph::getLowerInfo(const ir::OperandIndex &index)
{
if (!_lower_info_map)
return nullptr;
return itr->second.get();
}
-void Graph::setLowerInfo(const model::OperandIndex &index,
+void Graph::setLowerInfo(const ir::OperandIndex &index,
std::unique_ptr<operand::LowerInfo> &&lower_info)
{
assert(_lower_info_map);
_lower_info_map->operand.insert(std::make_pair(index, std::move(lower_info)));
}
-void Graph::removeLowerInfo(const model::OperandIndex &index)
+void Graph::removeLowerInfo(const ir::OperandIndex &index)
{
_lower_info_map->operand.erase(index);
}
void Graph::makeSubgraphs(
- model::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> &operands_lower_info)
+ ir::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> &operands_lower_info)
{
// if SUBG_MAX_NODE == 0, no limit on nodes of a subgraph
const int subg_max_node = util::getConfigInt(util::config::SUBG_MAX_NODE);
bool is_profiling = util::getConfigBool(util::config::PROFILING_MODE);
model::Subgraph *subg = nullptr;
- model::SubgraphIndex subg_index;
+ ir::SubgraphIndex subg_index;
// NOTE: The below method appends nodes while making one subgraph if needed. If something better
// ways, happy to update this code.
- Graph::PostDfsConstIterator().iterate(*this, [&](const model::OperationIndex &node_index,
+ Graph::PostDfsConstIterator().iterate(*this, [&](const ir::OperationIndex &node_index,
const model::Operation &node) {
// LowerInfo for in/output operands
auto backend = _backend_resolver->getBackend(node_index);
lower_info->addDefPermuteFactor(operand::PermuteFactor{backend, backend_layout});
}
- if (node.opcode() == model::OpCode::Split)
+ if (node.opcode() == ir::OpCode::Split)
{
// Ideally this condition must be like 'node.getOutputs().size() > 1' but
// this is true for HashtableLookup also. TODO: Come up with more clever solution
}
void Graph::manipulateLowerInfo(
- model::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> &operands_lower_info)
+ ir::OperandIndexMap<std::unique_ptr<operand::LowerInfo>> &operands_lower_info)
{
const auto default_backend = backend::BackendManager::get().getDefault();
for (auto index : _inputs)
}
// Set LowerInfo for each operand from the operand::LowerInfo holder
- _operands.iterate([&](const model::OperandIndex &index, model::Operand &) {
+ _operands.iterate([&](const ir::OperandIndex &index, ir::Operand &) {
setLowerInfo(index, std::move(operands_lower_info[index]));
});
}
std::map<uint32_t, std::string> dumps;
- _operands.iterate([&](const model::OperandIndex &index, model::Operand &object) {
+ _operands.iterate([&](const ir::OperandIndex &index, ir::Operand &object) {
std::stringstream sstream;
if (!getLowerInfo(index)->def_factors().empty() || !getLowerInfo(index)->use_factors().empty())
{
return "{ " + str + "}";
};
- auto operation_index_to_string = [](const model::OperationIndexList &operations) {
+ auto operation_index_to_string = [](const ir::OperationIndexList &operations) {
std::string str;
for (auto op : operations.list())
{
}
}
-bool Graph::mergeable(const model::SubgraphIndex &subg_index,
- const model::OperationIndex &node_index, ir::Layout layout)
+bool Graph::mergeable(const ir::SubgraphIndex &subg_index, const ir::OperationIndex &node_index,
+ ir::Layout layout)
{
// Are they mergeable?
// 1. the same backend id and layout?
return false;
}
-model::SubgraphIndex Graph::appendFreshSingleOpSubgraph(const model::OperationIndex &node_index,
- const model::Operation &node,
- ir::Layout layout)
+ir::SubgraphIndex Graph::appendFreshSingleOpSubgraph(const ir::OperationIndex &node_index,
+ const model::Operation &node,
+ ir::Layout layout)
{
// Create a fresh subgraph with one operation, and append it to subgraphs
// Create a fresh subgraph
void Graph::DefaultIterator<is_const>::iterate(GraphRef graph, const IterFn &fn) const
{
graph.operations().iterate(
- [&](const model::OperationIndex &index, NodeRef node) -> void { fn(index, node); });
+ [&](const ir::OperationIndex &index, NodeRef node) -> void { fn(index, node); });
}
//
{
assert(!graph.isBuildingPhase()); // Restrict iteration condition
- model::OperationIndexMap<bool> visited;
+ ir::OperationIndexMap<bool> visited;
graph.operations().iterate(
- [&](const model::OperationIndex &index, NodeRef) { visited[index] = false; });
+ [&](const ir::OperationIndex &index, NodeRef) { visited[index] = false; });
- std::function<void(const model::OperationIndex &, NodeRef)> dfs_recursive =
- [&](const model::OperationIndex &index, NodeRef node) -> void {
+ std::function<void(const ir::OperationIndex &, NodeRef)> dfs_recursive =
+ [&](const ir::OperationIndex &index, NodeRef node) -> void {
if (visited[index])
return;
visited[index] = true;
graph.operations().iterate(dfs_recursive);
// All of the operations(nodes) must have been visited.
- assert(
- std::all_of(visited.begin(), visited.end(),
- [](const std::pair<const model::OperationIndex, bool> &v) { return v.second; }));
+ assert(std::all_of(visited.begin(), visited.end(),
+ [](const std::pair<const ir::OperationIndex, bool> &v) { return v.second; }));
}
void Graph::setBackendResolver(std::unique_ptr<compiler::BackendResolver> &&br)
* limitations under the License.
*/
-#include "model/OpCode.h"
+#include "ir/OpCode.h"
#include <unordered_map>
namespace neurun
{
-namespace model
+namespace ir
{
const char *toString(OpCode opcode)
{
static const std::unordered_map<OpCode, const char *> map{{OpCode::Invalid, "Invalid"},
#define OP(Name) {OpCode::Name, #Name},
-#include "model/Operations.lst"
+#include "ir/Operations.lst"
#undef OP
{OpCode::Subgraph, "Subgraph"},
{OpCode::COUNT, "COUNT"}};
return map.at(opcode);
}
-} // namespace model
+} // namespace ir
} // namespace neurun
* limitations under the License.
*/
-#include "model/Operand.h"
+#include "ir/Operand.h"
namespace neurun
{
-namespace model
+namespace ir
{
size_t Operand::operandSize(void) const
return element_size * elements;
}
-void Operand::appendUse(const ::neurun::model::OperationIndex &idx) { _uses.append(idx); }
+void Operand::appendUse(const OperationIndex &idx) { _uses.append(idx); }
-void Operand::removeUse(const ::neurun::model::OperationIndex &idx) { _uses.remove(idx); }
+void Operand::removeUse(const OperationIndex &idx) { _uses.remove(idx); }
-void Operand::appendDef(const ::neurun::model::OperationIndex &idx)
+void Operand::appendDef(const OperationIndex &idx)
{
assert(!isConstant());
assert(_def.size() == 0);
_def.append(idx);
}
-void Operand::removeDef(const ::neurun::model::OperationIndex &idx)
+void Operand::removeDef(const OperationIndex &idx)
{
assert(_def.contains(idx));
graph::operand::ParentInfo *Operand::parent_info() { return _parent_info.get(); }
-} // namespace model
+} // namespace ir
} // namespace neurun
* limitations under the License.
*/
-#include "model/OperandIndexSequence.h"
+#include "ir/OperandIndexSequence.h"
#include <algorithm>
namespace neurun
{
-namespace model
+namespace ir
{
OperandIndexSequence::OperandIndexSequence(std::initializer_list<OperandIndex> list) : _set(list)
std::replace(_set.begin(), _set.end(), from, to);
}
-} // namespace model
+} // namespace ir
} // namespace neurun
* limitations under the License.
*/
-#include "model/OperationIndexList.h"
+#include "ir/OperationIndexList.h"
#include <algorithm>
namespace neurun
{
-namespace model
+namespace ir
{
OperationIndexList::OperationIndexList(std::initializer_list<OperationIndex> list) : _list(list)
// DO NOTHING
}
-bool OperationIndexList::contains(const ::neurun::model::OperationIndex &index) const
+bool OperationIndexList::contains(const OperationIndex &index) const
{
return std::find(_list.begin(), _list.end(), index) != _list.end();
}
-} // namespace model
+} // namespace ir
} // namespace neurun
namespace pass
{
-void ConstantInsertionPass::callback(const model::OperationIndex &node_index,
- model::Operation &node)
+void ConstantInsertionPass::callback(const ir::OperationIndex &node_index, model::Operation &node)
{
const auto &subgraph_index = _graph.subgraphs().getOperation(node_index);
const auto subg_lower_info = _graph.getLowerInfo(subgraph_index);
{
auto new_object = object;
// TODO Remove const_case
- const_cast<std::list<model::OperationIndex> &>(new_object.getDef().list()).clear();
- const_cast<std::list<model::OperationIndex> &>(new_object.getUses().list()).clear();
+ const_cast<std::list<ir::OperationIndex> &>(new_object.getDef().list()).clear();
+ const_cast<std::list<ir::OperationIndex> &>(new_object.getUses().list()).clear();
const auto new_index = _graph.operands().emplace(new_object);
_replace_operands_map[key] = new_index;
#define __NEURUN_GRAPH_PASS_CONSTANT_INSERTION_PASS_H__
#include <ir/operand/PermuteFactor.h>
-#include <model/Index.h>
+#include <ir/Index.h>
#include "OperationPass.h"
#include <unordered_map>
#include <utility>
std::string id() final { return "ConstantInsertionPass"; }
public:
- void callback(const model::OperationIndex &index, model::Operation &node) final;
+ void callback(const ir::OperationIndex &index, model::Operation &node) final;
private:
struct ReplaceKey
{
- model::OperandIndex index;
+ ir::OperandIndex index;
graph::operand::PermuteFactor factor;
bool operator==(const ReplaceKey &other) const
std::size_t operator()(const ReplaceKey &key) const noexcept
{
using std::hash;
- return hash<model::OperandIndex>()(key.index) ^
+ return hash<ir::OperandIndex>()(key.index) ^
(hash<graph::operand::PermuteFactor>()(key.factor) << 1);
}
};
- std::unordered_map<ReplaceKey, model::OperandIndex, KeyHasher> _replace_operands_map;
+ std::unordered_map<ReplaceKey, ir::OperandIndex, KeyHasher> _replace_operands_map;
};
} // namespace pass
void OperandPass::run()
{
_graph.operands().iterate(
- [&](const model::OperandIndex &index, model::Operand &object) { callback(index, object); });
+ [&](const ir::OperandIndex &index, ir::Operand &object) { callback(index, object); });
}
} // namespace pass
#define __NEURUN_GRAPH_PASS_OPERAND_PASS_H__
#include "Pass.h"
-#include "model/Index.h"
+#include "ir/Index.h"
namespace neurun
{
-namespace model
+namespace ir
{
class Operand;
-} // namespace graph
+} // namespace ir
} // namespace neurun
namespace neurun
public:
std::string id() override = 0;
void run() override final;
- virtual void callback(const model::OperandIndex &i, model::Operand &o) = 0;
+ virtual void callback(const ir::OperandIndex &i, ir::Operand &o) = 0;
};
} // namespace pass
#include "OperationPass.h"
-#include "model/Index.h"
+#include "ir/Index.h"
#include "model/Operation.h"
#include "ir/Graph.h"
void OperationPass::run()
{
_graph.operations().iterate(
- [&](const model::OperationIndex &index, model::Operation &node) { callback(index, node); });
+ [&](const ir::OperationIndex &index, model::Operation &node) { callback(index, node); });
}
} // namespace pass
#define __NEURUN_GRAPH_PASS_OPERATION_PASS_H__
#include "Pass.h"
-#include "model/Index.h"
+#include "ir/Index.h"
namespace neurun
{
* @param index is the index of a node in graph
* @param node is the node in graph
*/
- virtual void callback(const model::OperationIndex &index, model::Operation &node) = 0;
+ virtual void callback(const ir::OperationIndex &index, model::Operation &node) = 0;
/**
* @brief Run the pass
#include "PermutationEliminationPass.h"
-#include "model/Operand.h"
+#include "ir/Operand.h"
#include "ir/operand/LowerInfo.h"
#include "ir/Graph.h"
#include "backend/IConfig.h"
{
namespace pass
{
-void PermutationEliminationPass::callback(const model::OperandIndex &inp_index,
- model::Operand &object)
+void PermutationEliminationPass::callback(const ir::OperandIndex &inp_index, ir::Operand &object)
{
if (_graph.getInputs().contains(inp_index))
{
}
}
-void PermutationEliminationPass::eliminateInput(const model::OperandIndex &inp_index,
- model::Operand &object)
+void PermutationEliminationPass::eliminateInput(const ir::OperandIndex &inp_index,
+ ir::Operand &object)
{
auto &model_inputs = _graph.getInputs();
}
}
-void PermutationEliminationPass::eliminateOutput(const model::OperandIndex &out_index,
- model::Operand &object)
+void PermutationEliminationPass::eliminateOutput(const ir::OperandIndex &out_index,
+ ir::Operand &object)
{
auto &model_outputs = _graph.getOutputs();
}
bool PermutationEliminationPass::isPermuteLayerToEliminate(
- const model::OperandIndexSequence &inp_indexes, const model::OperandIndexSequence &out_indexes,
+ const ir::OperandIndexSequence &inp_indexes, const ir::OperandIndexSequence &out_indexes,
bool is_for_model_input)
{
auto input_def_factors = _graph.getLowerInfo(inp_indexes.at(0))->def_factors();
#define __NEURUN_GRAPH_PASS_PERMUTATION_ELIMINATION_PASS_H__
#include "OperandPass.h"
-#include "model/Operand.h"
-#include "model/OperandIndexSequence.h"
+#include "ir/Operand.h"
+#include "ir/OperandIndexSequence.h"
namespace neurun
{
public:
std::string id() override { return "PermutationEliminationPass"; }
- void callback(const model::OperandIndex &index, model::Operand &object) override;
+ void callback(const ir::OperandIndex &index, ir::Operand &object) override;
private:
/**
*
* @return
*/
- void eliminateInput(const model::OperandIndex &inp_index, model::Operand &object);
+ void eliminateInput(const ir::OperandIndex &inp_index, ir::Operand &object);
/**
* @brief Remove Permute operation that permutates output of a model
*
* @return
*/
- void eliminateOutput(const model::OperandIndex &out_index, model::Operand &object);
+ void eliminateOutput(const ir::OperandIndex &out_index, ir::Operand &object);
/**
* @brief Determine if passed operands are permute layer's input and output, that must be
*
* @return if it is permutation layer
*/
- bool isPermuteLayerToEliminate(const model::OperandIndexSequence &inp_indexes,
- const model::OperandIndexSequence &out_indexes,
+ bool isPermuteLayerToEliminate(const ir::OperandIndexSequence &inp_indexes,
+ const ir::OperandIndexSequence &out_indexes,
bool is_for_model_input);
};
#include <utility>
#include <unordered_map>
-#include "model/Operand.h"
+#include "ir/Operand.h"
#include "ir/operation/LowerInfo.h"
#include "ir/Graph.h"
#include "backend/IConfig.h"
namespace pass
{
-void PermutationInsertionPass::callback(const model::OperandIndex &index, model::Operand &object)
+void PermutationInsertionPass::callback(const ir::OperandIndex &index, ir::Operand &object)
{
auto &&operand_li = _graph.getLowerInfo(index);
assert(operand_li);
return;
}
- std::list<model::OperationIndex> permute_indexes;
+ std::list<ir::OperationIndex> permute_indexes;
// Build a map for all necessary type of operands
- std::unordered_map<operand::PermuteFactor, model::OperandIndex> factor_to_index;
+ std::unordered_map<operand::PermuteFactor, ir::OperandIndex> factor_to_index;
{
assert(operand_li->def_factors().size() == 1);
for (auto factor : operand_li->def_factors())
// Update operations' input that uses this operand
{
- std::list<model::OperationIndex> remove_list;
+ std::list<ir::OperationIndex> remove_list;
auto uses = object.getUses();
for (auto use : uses.list())
}
}
-model::OperationIndex
-PermutationInsertionPass::insertPermute(const model::OperandIndex &operand_index,
- const operand::PermuteFactor &factor)
+ir::OperationIndex PermutationInsertionPass::insertPermute(const ir::OperandIndex &operand_index,
+ const operand::PermuteFactor &factor)
{
assert(!_graph.isBuildingPhase());
#define __NEURUN_GRAPH_PASS_PERMUTATION_INSERTION_PASS_H__
#include "OperandPass.h"
-#include "model/Operand.h" //for model::OperationIndex
+#include "ir/Operand.h" //for OperationIndex
#include "backend/BackendManager.h"
#include "ir/operand/PermuteFactor.h"
public:
std::string id() override { return "PermutationInsertionPass"; }
- void callback(const model::OperandIndex &index, model::Operand &object) override;
+ void callback(const ir::OperandIndex &index, ir::Operand &object) override;
/**
* @brief Insert Permute operation that has given operand as input
* @param operand_index is the target operand index for the insertion
* @param factor is the output operand's backend type and layout
*
- * @return model::OperationIndex
+ * @return OperationIndex
*/
- model::OperationIndex insertPermute(const model::OperandIndex &operand_index,
- const operand::PermuteFactor &factor);
+ ir::OperationIndex insertPermute(const ir::OperandIndex &operand_index,
+ const operand::PermuteFactor &factor);
private:
};
namespace pass
{
-void PermutationOperationPass::callback(const model::OperationIndex &, model::Operation &node)
+void PermutationOperationPass::callback(const ir::OperationIndex &, model::Operation &node)
{
node.accept(*this);
};
below_subg.setInputs(it->node->getInputs());
below_subg.setOutputs(it->node->getOutputs());
- std::vector<model::OperationIndex> remove_list;
+ std::vector<ir::OperationIndex> remove_list;
remove_list.emplace_back(it->index);
while (++it != above_subg.end())
{
std::string id() final { return "PermutationOperationPass"; }
public:
- void callback(const model::OperationIndex &i, model::Operation &n) final;
+ void callback(const ir::OperationIndex &i, model::Operation &n) final;
public:
void visit(const model::operation::FullyConnected &) final;
#include "Verifier.h"
#include "ir/Graph.h"
-#include "model/OperationIndexMap.h"
+#include "ir/OperationIndexMap.h"
#include "util/logging.h"
auto &operations = graph.operations();
bool cyclic = false;
- model::OperationIndexMap<bool> visited;
- operations.iterate([&](const model::OperationIndex &index, const model::Operation &) {
- visited[index] = false;
- });
- model::OperationIndexMap<bool> on_stack = visited; // Copy from visited
+ ir::OperationIndexMap<bool> visited;
+ operations.iterate(
+ [&](const ir::OperationIndex &index, const model::Operation &) { visited[index] = false; });
+ ir::OperationIndexMap<bool> on_stack = visited; // Copy from visited
- std::function<void(const model::OperationIndex &index, const model::Operation &)> dfs_recursive =
- [&](const model::OperationIndex &index, const model::Operation &node) -> void {
+ std::function<void(const ir::OperationIndex &index, const model::Operation &)> dfs_recursive =
+ [&](const ir::OperationIndex &index, const model::Operation &node) -> void {
if (on_stack[index])
cyclic = true;
if (visited[index])
{
auto &operations = graph.operations();
uint32_t mismatches = 0;
- operations.iterate([&](const model::OperationIndex &index, const model::Operation &node) {
+ operations.iterate([&](const ir::OperationIndex &index, const model::Operation &node) {
for (auto operand_index : node.getInputs())
{
auto &operand = graph.operands().at(operand_index);
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "model/OperandConstraint.h"
-
-namespace neurun
-{
-namespace model
-{
-namespace operation
-{
-
-} // namespace operation
-} // namespace model
-} // namespace neurun
}
}
-bool Subgraph::exist(const neurun::model::OperationIndex &index) const
+bool Subgraph::exist(const OperationIndex &index) const
{
for (const auto &element : _operations)
{
ir::DataType tensorTypeToDataType(TensorType type);
// Create operands form tflite::Tensor
- model::OperandIndex loadOperand(const Tensor *tensor);
- void loadOperationIO(const Operator *op, model::OperandIndexSequence &inputs,
- model::OperandIndexSequence &outputs);
+ ir::OperandIndex loadOperand(const Tensor *tensor);
+ void loadOperationIO(const Operator *op, ir::OperandIndexSequence &inputs,
+ ir::OperandIndexSequence &outputs);
// Create operations from Operator
void loadOperation(const Operator *op);
// Load Strides and Paddings from options to param
graph::Graph &_graph;
const Model *_model;
// Maps Tensor indices to neurun Operands.
- std::vector<model::OperandIndex> _tensor_to_operand;
+ std::vector<ir::OperandIndex> _tensor_to_operand;
};
template <typename LoaderDomain, typename SpecificLoader>
}
template <typename LoaderDomain, typename SpecificLoader>
-model::OperandIndex BaseLoader<LoaderDomain, SpecificLoader>::loadOperand(const Tensor *tensor)
+ir::OperandIndex BaseLoader<LoaderDomain, SpecificLoader>::loadOperand(const Tensor *tensor)
{
ir::Shape shape;
// Shape
const auto *data = _model->buffers()->Get(tensor->buffer())->data();
if (data != nullptr)
{
- auto ptr = nnfw::cpp14::make_unique<model::CachedData>(data->data(), data->size());
+ auto ptr = nnfw::cpp14::make_unique<ir::CachedData>(data->data(), data->size());
_graph.setOperandValue(operand_index, std::move(ptr));
}
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadOperationIO(const Operator *op,
- model::OperandIndexSequence &inputs,
- model::OperandIndexSequence &outputs)
+ ir::OperandIndexSequence &inputs,
+ ir::OperandIndexSequence &outputs)
{
for (const std::int32_t idx : *op->inputs())
{
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadConv2D(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadDepthwiseConv2D(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadTransposeConv(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadAvgPool2D(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadReshape(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadSoftmax(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadMaxPool2D(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadConcatenation(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadInstanceNorm(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadFC(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadAdd(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadSub(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadMul(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadDiv(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
void BaseLoader<LoaderDomain, SpecificLoader>::loadPack(const Operator *op)
{
// This runtime_error will be removed if the one of backend supports this operation
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadRelu(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadRelu6(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadResizeBilinear(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
auto input = inputs.at(0);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadRsqrt(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadSqrt(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadSquaredDifference(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadTanh(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadTranspose(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
auto input = inputs.at(0);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadMean(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
auto input = inputs.at(0);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadReduceMax(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
auto input = inputs.at(0);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadPad(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadLogistic(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadExp(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadGather(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
model::operation::Gather::Param param;
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadSpaceToBatchND(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadBatchToSpaceND(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
auto input = inputs.at(0);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadReduceSum(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
auto input = inputs.at(0);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadCustom(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
auto *op_code = _model->operator_codes()->Get(op->opcode_index());
auto custom_op_id = op_code->custom_code()->str();
- auto constraint = model::operation::OperandConstraint::createExact(inputs.size());
+ auto constraint = ir::OperandConstraint::createExact(inputs.size());
assert(op->custom_options_format() == CustomOptionsFormat::CustomOptionsFormat_FLEXBUFFERS &&
"Unsupported custom operation options format");
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadSqueeze(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
template <typename LoaderDomain, typename SpecificLoader>
void BaseLoader<LoaderDomain, SpecificLoader>::loadPrelu(const Operator *op)
{
- model::OperandIndexSequence inputs;
- model::OperandIndexSequence outputs;
+ ir::OperandIndexSequence inputs;
+ ir::OperandIndexSequence outputs;
loadOperationIO(op, inputs, outputs);
VERBOSE(NNAPI::Model) << "setOperandValue: Invalid index value (negative)" << std::endl;
return ANEURALNETWORKS_BAD_DATA;
}
- // NOTE ::neurun::model::OperandIndex uses uint32_t as its underlying type as various NNAPI
+ // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI
// functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand
// index
// ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index.
<< std::endl;
return ANEURALNETWORKS_BAD_DATA;
}
- // NOTE ::neurun::model::OperandIndex uses uint32_t as its underlying type as various NNAPI
+ // NOTE OperandIndex uses uint32_t as its underlying type as various NNAPI
// functions such as ANeuralNetworksModel_addOperation use uint32_t to represent operand
// index
// ANeuralNetworksModel_setOperandValue, however, uses int32_t to represent operand index.
#include "NNAPIConvert.h"
#include "util/logging.h"
-const neurun::model::OperandIndex
+const neurun::ir::OperandIndex
ANeuralNetworksExecution::getInputOperandIndex(int32_t index) noexcept
{
if (index < 0)
{
// Negative index: return invalid index
- return neurun::model::OperandIndex{};
+ return neurun::ir::OperandIndex{};
}
uint32_t cast_index = static_cast<uint32_t>(index);
if (cast_index >= _execution->graph().getInputs().size())
{
// Return invalid index
- return neurun::model::OperandIndex{};
+ return neurun::ir::OperandIndex{};
}
- neurun::model::IOIndex input_index{cast_index};
+ neurun::ir::IOIndex input_index{cast_index};
const auto operand_index = _execution->graph().getInputs().at(input_index);
return operand_index;
}
-const neurun::model::OperandIndex
+const neurun::ir::OperandIndex
ANeuralNetworksExecution::getOutputOperandIndex(int32_t index) noexcept
{
if (index < 0)
{
// Negative index: return invalid index
- return neurun::model::OperandIndex{};
+ return neurun::ir::OperandIndex{};
}
uint32_t cast_index = static_cast<uint32_t>(index);
if (cast_index >= _execution->graph().getOutputs().size())
{
// Return invalid index
- return neurun::model::OperandIndex{};
+ return neurun::ir::OperandIndex{};
}
- neurun::model::IOIndex output_index{cast_index};
+ neurun::ir::IOIndex output_index{cast_index};
const auto operand_index = _execution->graph().getOutputs().at(output_index);
return operand_index;
}
bool ANeuralNetworksExecution::compareDataType(const ANeuralNetworksOperandType *type,
- const neurun::model::OperandIndex index) noexcept
+ const neurun::ir::OperandIndex index) noexcept
{
try
{
}
bool ANeuralNetworksExecution::compareShape(const ANeuralNetworksOperandType *type,
- const neurun::model::OperandIndex index) noexcept
+ const neurun::ir::OperandIndex index) noexcept
{
// Passed shape should be specified
if (haveUnspecifiedDims(index))
return operand_shape == shape_from_type;
}
-bool ANeuralNetworksExecution::haveUnspecifiedDims(const neurun::model::OperandIndex index) noexcept
+bool ANeuralNetworksExecution::haveUnspecifiedDims(const neurun::ir::OperandIndex index) noexcept
{
const auto operand_shape = _execution->graph().operands().at(index).shape();
return operand_shape.num_elements() == 0;
}
-size_t ANeuralNetworksExecution::getOperandSize(const neurun::model::OperandIndex index) noexcept
+size_t ANeuralNetworksExecution::getOperandSize(const neurun::ir::OperandIndex index) noexcept
{
try
{
{
try
{
- neurun::model::IOIndex input_index{index};
+ neurun::ir::IOIndex input_index{index};
const auto operand_index = getInputOperandIndex(index);
const auto type_info = _execution->graph().operands().at(operand_index).typeInfo();
{
try
{
- neurun::model::IOIndex output_index{index};
+ neurun::ir::IOIndex output_index{index};
const auto operand_index = getOutputOperandIndex(index);
const auto type_info = _execution->graph().operands().at(operand_index).typeInfo();
{
try
{
- neurun::model::IOIndex output_index{index};
+ neurun::ir::IOIndex output_index{index};
const auto operand_index = getOutputOperandIndex(index);
bool unspecified = haveUnspecifiedDims(operand_index);
{
try
{
- neurun::model::IOIndex output_index{index};
+ neurun::ir::IOIndex output_index{index};
const auto operand_index = getOutputOperandIndex(index);
bool unspecified = haveUnspecifiedDims(operand_index);
if (unspecified)
bool startExecute(void) noexcept;
bool execute(void) noexcept;
- const neurun::model::OperandIndex getInputOperandIndex(int32_t index) noexcept;
- const neurun::model::OperandIndex getOutputOperandIndex(int32_t index) noexcept;
+ const neurun::ir::OperandIndex getInputOperandIndex(int32_t index) noexcept;
+ const neurun::ir::OperandIndex getOutputOperandIndex(int32_t index) noexcept;
bool compareDataType(const ANeuralNetworksOperandType *type,
- const neurun::model::OperandIndex index) noexcept;
+ const neurun::ir::OperandIndex index) noexcept;
bool compareShape(const ANeuralNetworksOperandType *type,
- const neurun::model::OperandIndex index) noexcept;
- bool haveUnspecifiedDims(const neurun::model::OperandIndex index) noexcept;
- size_t getOperandSize(const neurun::model::OperandIndex index) noexcept;
+ const neurun::ir::OperandIndex index) noexcept;
+ bool haveUnspecifiedDims(const neurun::ir::OperandIndex index) noexcept;
+ size_t getOperandSize(const neurun::ir::OperandIndex index) noexcept;
const std::shared_ptr<neurun::exec::Execution> instance(void) noexcept;
/**
bool ANeuralNetworksModel::setOperandValue(uint32_t index, const void *buffer, size_t length,
bool optional, bool copy) noexcept
{
- const neurun::model::OperandIndex ind{index};
+ const neurun::ir::OperandIndex ind{index};
try
{
setOptionalOperand(ind);
}
- using ::neurun::model::CachedData;
- using ::neurun::model::ExternalData;
+ using neurun::ir::CachedData;
+ using neurun::ir::ExternalData;
if (copy)
{
_graph->operands().at(ind).data(
{
_operand_usages[index] = OperandUsage::MODEL_INPUT;
- const neurun::model::OperandIndex ind{index};
+ const neurun::ir::OperandIndex ind{index};
_graph->addInput(ind);
}
catch (const std::exception &e)
{
try
{
- const neurun::model::OperandIndex ind{index};
+ const neurun::ir::OperandIndex ind{index};
// Duplicated output is not allowed
if (_graph->getOutputs().contains(ind))
bool ANeuralNetworksModel::isExistOperand(uint32_t index) noexcept
{
- return _graph->operands().exist(neurun::model::OperandIndex{index});
+ return _graph->operands().exist(neurun::ir::OperandIndex{index});
}
size_t ANeuralNetworksModel::operandSize(uint32_t index) noexcept
{
try
{
- return _graph->operands().at(neurun::model::OperandIndex{index}).operandSize();
+ return _graph->operands().at(neurun::ir::OperandIndex{index}).operandSize();
}
catch (const std::exception &e)
{
return (_operand_usages[index] == OperandUsage::OPERATION_OUTPUT);
}
-void ANeuralNetworksModel::setOptionalOperand(const neurun::model::OperandIndex idx)
+void ANeuralNetworksModel::setOptionalOperand(const neurun::ir::OperandIndex idx)
{
_optional_operands.insert(idx);
}
void ANeuralNetworksModel::fillOptionalOperand(void)
{
_graph->operations().iterate(
- [&](const ::neurun::model::OperationIndex &, ::neurun::model::Operation &node) {
+ [&](const neurun::ir::OperationIndex &, ::neurun::model::Operation &node) {
for (auto input : node.getInputs())
{
// TODO fill default value for optional operands
void release(std::shared_ptr<neurun::graph::Graph> &graph) { graph = _graph; }
private:
- void setOptionalOperand(const neurun::model::OperandIndex idx);
+ void setOptionalOperand(const neurun::ir::OperandIndex idx);
void fillOptionalOperand(void);
private:
std::shared_ptr<neurun::graph::Graph> _graph;
- std::unordered_set<neurun::model::OperandIndex> _optional_operands;
+ std::unordered_set<neurun::ir::OperandIndex> _optional_operands;
std::vector<OperandUsage> _operand_usages;
};
using namespace neurun::model;
_map[ANEURALNETWORKS_BATCH_TO_SPACE_ND] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ Operands &) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_CONCATENATION] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count >= 2); // At least one one input tensor and axis
assert(init_param.output_count == 1);
return new operation::Concat{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_RESHAPE] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_RESHAPE] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
// Each input should be interpreted as follows:
return new operation::Softmax{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_CAST_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_CAST_EX] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_REDUCE_SUM_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2);
assert(init_param.output_count == 1);
};
_map[ANEURALNETWORKS_STRIDED_SLICE] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 7 && init_param.output_count == 1);
OperandIndexSequence inputs{init_param.inputs[0], init_param.inputs[1], init_param.inputs[2],
};
_map[ANEURALNETWORKS_TRANSPOSE] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
// TODO make this work with init_param.input_count == 1 (when permutation vector is optional)
// Inputs
};
_map[ANEURALNETWORKS_SQUEEZE] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 1 || init_param.input_count == 2);
assert(init_param.output_count == 1);
return new operation::Squeeze{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_TANH] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_TANH] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::Tanh{inputs, outputs};
};
- _map[ANEURALNETWORKS_LOGISTIC] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_LOGISTIC] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::Div{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_EXP_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_EXP_EX] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_GREATER_EQUAL_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_LESS_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_REDUCE_MAX_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_NOT_EQUAL_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_LOGICAL_AND_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::LogicalAnd{inputs, outputs};
};
- _map[ANEURALNETWORKS_RSQRT_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_RSQRT_EX] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::RSQRT{inputs, outputs};
};
- _map[ANEURALNETWORKS_RELU] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_RELU] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_RESIZE_BILINEAR] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 3 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::ResizeBilinear{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_RELU1] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_RELU1] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::ReLU1{inputs, outputs};
};
- _map[ANEURALNETWORKS_RELU6] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_RELU6] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::ReLU6{inputs, outputs};
};
- _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ _map[ANEURALNETWORKS_RNN] = [](const OperationFactory::Param &init_param, Operands &operands) {
assert(init_param.input_count == 6 && init_param.output_count == 2);
// Each input should be interpreted as follows:
return new operation::RNN{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_FLOOR] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_FLOOR] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_SPACE_TO_BATCH_ND] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ Operands &) {
assert(init_param.input_count == 3 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_SPACE_TO_DEPTH] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_EMBEDDING_LOOKUP] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ Operands &) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_L2_NORMALIZATION] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_HASHTABLE_LOOKUP] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ Operands &) {
assert(init_param.input_count == 3 && init_param.output_count == 2);
// Each output should be interpreted as follows:
return new operation::HashtableLookup{inputs, outputs};
};
- _map[ANEURALNETWORKS_PRELU_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_PRELU_EX] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::TransposeConv{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_SQRT_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_SQRT_EX] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_LOGICAL_OR_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_LOGICAL_NOT_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::LogicalNot{inputs, outputs};
};
- _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ _map[ANEURALNETWORKS_LSTM] = [](const OperationFactory::Param &init_param, Operands &operands) {
assert(init_param.input_count == 23 && init_param.output_count == 4);
// Each input should be interpreted as follows:
};
_map[ANEURALNETWORKS_EQUAL_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_SQUARED_DIFFERENCE_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ Operands &) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_TOPK_V2_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 2);
// Each output should be interpreted as follows:
};
_map[ANEURALNETWORKS_GATHER_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 3 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::Gather{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_NEG_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_NEG_EX] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::Neg{inputs, outputs};
};
- _map[ANEURALNETWORKS_ABS_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_ABS_EX] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_ARGMAX_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::ArgMax{inputs, outputs, param};
};
- _map[ANEURALNETWORKS_DEQUANTIZE] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &) {
+ _map[ANEURALNETWORKS_DEQUANTIZE] = [](const OperationFactory::Param &init_param, Operands &) {
assert(init_param.input_count == 1 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
return new operation::Dequantize{inputs, outputs};
};
- _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ _map[ANEURALNETWORKS_MEAN] = [](const OperationFactory::Param &init_param, Operands &operands) {
assert(init_param.input_count == 3 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_LOCAL_RESPONSE_NORMALIZATION] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 5 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_DEPTH_TO_SPACE] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_PACK_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count >= 3 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_REDUCE_MIN_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 2 && init_param.output_count == 1);
OperandIndexSequence outputs{init_param.outputs[0]};
};
_map[ANEURALNETWORKS_SPLIT_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 3);
assert(init_param.output_count >= 1); // At least one output tensor and axis
};
_map[ANEURALNETWORKS_UNPACK_EX] = [](const OperationFactory::Param &init_param,
- neurun::model::Operands &operands) {
+ Operands &operands) {
assert(init_param.input_count == 3 && init_param.output_count >= 1);
OperandIndexSequence inputs{init_param.inputs[0]};
neurun::model::Operation *OperationFactory::create(ANeuralNetworksOperationType type,
const OperationFactory::Param ¶m,
- neurun::model::Operands &operands)
+ Operands &operands)
{
auto it = _map.find(type);
if (it == _map.end())
#include <unordered_map>
-#include "model/Operands.h"
+#include "ir/Operands.h"
#include "model/Operation.h"
#include "NeuralNetworks.h"
#include "NeuralNetworksEx.h"
public:
using Generator = std::function<neurun::model::Operation *(const OperationFactory::Param &,
- neurun::model::Operands &)>;
+ neurun::ir::Operands &)>;
public:
static OperationFactory &get();
public:
neurun::model::Operation *create(ANeuralNetworksOperationType,
const OperationFactory::Param ¶m,
- neurun::model::Operands &operands);
+ neurun::ir::Operands &operands);
// TODO add "register" method for separating registration, possibly supporting custom-ops
private:
return std::make_shared<MockConfig>();
}
std::unique_ptr<BackendContext>
- newContext(const model::Operands &,
- const std::shared_ptr<custom::IKernelBuilder> &kb) const override
+ newContext(const ir::Operands &, const std::shared_ptr<custom::IKernelBuilder> &kb) const override
{
return nullptr;
}
using OIS = OperandIndexSequence;
template <typename NodeT, typename... Types>
-model::OperationIndex create(std::shared_ptr<graph::Graph> graph, Types &&... args)
+OperationIndex create(std::shared_ptr<graph::Graph> graph, Types &&... args)
{
typename NodeT::Param op_params{Activation::NONE};
auto op = nnfw::cpp14::make_unique<NodeT>(std::forward<Types>(args)..., op_params);
{
using namespace neurun::model;
-using DataType = DataType;
class CompiledMockUpModel
{
{
using namespace neurun::model;
-using DataType = DataType;
using ExecManager = neurun::exec::interp::ExecManager;
using Execution = neurun::exec::Execution;
{
::neurun::graph::Graph graph;
- ::neurun::model::OperandIndex index0{0u};
- ::neurun::model::OperandIndex index1{1u};
+ neurun::ir::OperandIndex index0{0u};
+ neurun::ir::OperandIndex index1{1u};
graph.addInput({index0});
graph.addInput({index1});
- ::neurun::model::OperandIndex index10{10u};
- ::neurun::model::OperandIndex index11{11u};
- ::neurun::model::OperandIndex index12{12u};
+ neurun::ir::OperandIndex index10{10u};
+ neurun::ir::OperandIndex index11{11u};
+ neurun::ir::OperandIndex index12{12u};
graph.addOutput({index10});
graph.addOutput({index11});
ASSERT_EQ(graph.getInputs().size(), 2);
ASSERT_EQ(graph.getOutputs().size(), 3);
- ::neurun::model::IOIndex io_index0{0};
- ::neurun::model::IOIndex io_index1{1};
- ::neurun::model::IOIndex io_index2{2};
+ neurun::ir::IOIndex io_index0{0};
+ neurun::ir::IOIndex io_index1{1};
+ neurun::ir::IOIndex io_index2{2};
ASSERT_EQ(graph.getInputs().at(io_index0), 0);
ASSERT_EQ(graph.getInputs().at(io_index1), 1);
#define __NEURUN_TEST_GRAPH_MOCK_NODE_H__
#include "model/Operation.h"
-#include "model/OperandIndexSequence.h"
+#include "ir/OperandIndexSequence.h"
namespace neurun_test
{
class SimpleMock : public neurun::model::Operation
{
public:
- SimpleMock(const neurun::model::OperandIndexSequence &inputs,
- const neurun::model::OperandIndexSequence &outputs)
- : neurun::model::Operation{neurun::model::operation::OperandConstraint::createAny()}
+ SimpleMock(const neurun::ir::OperandIndexSequence &inputs,
+ const neurun::ir::OperandIndexSequence &outputs)
+ : neurun::model::Operation{neurun::ir::OperandConstraint::createAny()}
{
setInputs(inputs);
setOutputs(outputs);
public:
void accept(neurun::model::OperationVisitor &) const override {}
- neurun::model::OpCode opcode() const final { return neurun::model::OpCode::Invalid; }
+ neurun::ir::OpCode opcode() const final { return neurun::ir::OpCode::Invalid; }
};
} // namespace graph
#include <gtest/gtest.h>
-#include "model/OperandIndexSequence.h"
+#include "ir/OperandIndexSequence.h"
-using neurun::model::OperandIndex;
-using neurun::model::OperandIndexSequence;
+using neurun::ir::OperandIndex;
+using neurun::ir::OperandIndexSequence;
TEST(graph_OperandIndexSequence, append)
{
ASSERT_EQ(iset.size(), 5);
- neurun::model::IOIndex index1{1};
- neurun::model::IOIndex index2{4};
+ neurun::ir::IOIndex index1{1};
+ neurun::ir::IOIndex index2{4};
ASSERT_EQ(iset.at(index1), 2);
ASSERT_EQ(iset.at(index2), 10);
#include <gtest/gtest.h>
-#include "model/Operands.h"
+#include "ir/Operands.h"
TEST(graph_operand_Set, set_test)
{
- neurun::model::Operands set;
+ neurun::ir::Operands set;
neurun::ir::Shape shape0{1, 2, 3};
set.emplace(shape0, type);
set.emplace(shape1, type);
- ASSERT_EQ(set.exist(neurun::model::OperandIndex{0u}), true);
- ASSERT_EQ(set.exist(neurun::model::OperandIndex{1u}), true);
- ASSERT_EQ(set.exist(neurun::model::OperandIndex{2u}), false);
+ ASSERT_EQ(set.exist(neurun::ir::OperandIndex{0u}), true);
+ ASSERT_EQ(set.exist(neurun::ir::OperandIndex{1u}), true);
+ ASSERT_EQ(set.exist(neurun::ir::OperandIndex{2u}), false);
- ASSERT_EQ(set.at(neurun::model::OperandIndex{0u}).shape().dim(0), 1);
- ASSERT_EQ(set.at(neurun::model::OperandIndex{0u}).shape().dim(1), 2);
- ASSERT_EQ(set.at(neurun::model::OperandIndex{0u}).shape().dim(2), 3);
+ ASSERT_EQ(set.at(neurun::ir::OperandIndex{0u}).shape().dim(0), 1);
+ ASSERT_EQ(set.at(neurun::ir::OperandIndex{0u}).shape().dim(1), 2);
+ ASSERT_EQ(set.at(neurun::ir::OperandIndex{0u}).shape().dim(2), 3);
}
namespace
{
-using IndexSet = neurun::model::OperandIndexSequence;
+using IndexSet = neurun::ir::OperandIndexSequence;
using Mock = neurun_test::graph::SimpleMock;
} // namespace anonymous
using neurun::model::Operations;
using neurun::model::Operation;
-using neurun::model::OperationIndex;
+using neurun::ir::OperationIndex;
TEST(graph_operation_Set, operation_test)
{
#include <gtest/gtest.h>
#include "ir/Graph.h"
-#include "model/Index.h"
-#include "model/OperandIndexSequence.h"
+#include "ir/Index.h"
+#include "ir/OperandIndexSequence.h"
#include "model/operation/Conv2D.h"
#include "model/operation/Concat.h"
#include <stdexcept>
-using Index = neurun::model::IOIndex;
-using IndexSet = neurun::model::OperandIndexSequence;
+using Index = neurun::ir::IOIndex;
+using IndexSet = neurun::ir::OperandIndexSequence;
TEST(graph_operation_setIO, operation_setIO_conv)
{
#include "ir/Graph.h"
#include "ir/verifier/Verifier.h"
#include "cpp14/memory.h"
-#include "model/Operand.h"
+#include "ir/Operand.h"
#include "../MockNode.h"
-using IndexSet = neurun::model::OperandIndexSequence;
+using IndexSet = neurun::ir::OperandIndexSequence;
using Mock = neurun_test::graph::SimpleMock;
TEST(Verifier, dag_checker)