Remove tensor conversions since we will introduce a new way.
Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
virtual Stage generate(const ::internal::tflite::op::FullyConnected::Node &node) = 0;
virtual Stage generate(const ::internal::tflite::op::Reshape::Node &node) = 0;
virtual Stage generate(const ::internal::tflite::op::Softmax::Node &node) = 0;
- virtual Stage
- generate(const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node) = 0;
- virtual Stage generate(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node) = 0;
- virtual Stage
- generate(const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node) = 0;
- virtual Stage generate(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node) = 0;
virtual Stage generate(const ::internal::tflite::op::NOP::Node &node) = 0;
};
{
virtual ~ITensorBuilder(void) = default;
virtual void mark(const ::neurun::graph::operand::Index &ind) = 0;
- virtual void markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind) = 0;
- virtual void markToCommon(const ::internal::tflite::op::Node &op, int32_t ind) = 0;
- virtual void insertTensorConvertNodes(::internal::tflite::op::Sequence &operations) = 0;
// TODO Add an interface for adding subsumption info
virtual void prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) = 0;
virtual void allocate(void) = 0;
//
// StageGenerator
//
-StageGenerator::StageGenerator(
- const neurun::graph::operand::Set &ctx, const std::shared_ptr<TensorBuilder> &tensor_builder,
- const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder)
- : _ctx(ctx), _tensor_builder(tensor_builder), _common_tensor_builder(common_tensor_builder)
+StageGenerator::StageGenerator(const neurun::graph::operand::Set &ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder)
+ : _ctx(ctx), _tensor_builder(tensor_builder)
{
// DO NOTHING
}
};
}
-Stage StageGenerator::generate(
- const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node)
-{
- throw std::runtime_error("Wrong Approach");
-}
-
-Stage StageGenerator::generate(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node)
-{
- throw std::runtime_error("Wrong Approach");
-}
-
-Stage StageGenerator::generate(
- const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node)
-{
- const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index};
-
- struct Param
- {
- int ifm_index;
-
- ::neurun::internal::operand::Shape shape{1};
- };
-
- Param param;
-
- param.ifm_index = ifm_index.asInt();
-
- param.shape = _ctx.at(ifm_index).shape();
-
- auto tensors = _tensor_builder;
-
- _common_tensor_builder->mark(ifm_index);
-
- auto common_tensor_builder = _common_tensor_builder;
-
- return [tensors, common_tensor_builder, param](IExecutionBuilder &builder) {
- const ::neurun::graph::operand::Index ifm_index{param.ifm_index};
-
- auto input_alloc = tensors->at(ifm_index).get();
- auto common_tensor = common_tensor_builder->at(ifm_index);
-
- std::unique_ptr<::neurun::kernel::acl_cl::TensorConvertFromCommonLayer> fn{
- new ::neurun::kernel::acl_cl::TensorConvertFromCommonLayer};
-
- fn->configure(common_tensor.get(), input_alloc, param.shape);
-
- builder.append(std::move(fn));
- };
-}
-
-Stage StageGenerator::generate(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node)
-{
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
-
- struct Param
- {
- int ofm_index;
-
- ::neurun::internal::operand::Shape shape{1};
- };
-
- Param param;
-
- param.ofm_index = ofm_index.asInt();
-
- param.shape = _ctx.at(ofm_index).shape();
-
- auto tensors = _tensor_builder;
-
- _common_tensor_builder->mark(ofm_index);
-
- auto common_tensor_builder = _common_tensor_builder;
-
- return [tensors, common_tensor_builder, param](IExecutionBuilder &builder) {
- const ::neurun::graph::operand::Index ofm_index{param.ofm_index};
-
- auto output_alloc = tensors->at(ofm_index).get();
- auto common_tensor = common_tensor_builder->at(ofm_index);
-
- std::unique_ptr<::neurun::kernel::acl_cl::TensorConvertToCommonLayer> fn{
- new ::neurun::kernel::acl_cl::TensorConvertToCommonLayer};
-
- fn->configure(output_alloc, common_tensor.get(), param.shape);
-
- builder.append(std::move(fn));
- };
-}
-
Stage StageGenerator::generate(const ::internal::tflite::op::NOP::Node &node)
{
// DO NOTHING
#include "graph/operand/Set.h"
#include "backend/acl_cl/TensorBuilder.h"
-#include "internal/common/TensorBuilder.h"
namespace neurun
{
{
public:
StageGenerator(const neurun::graph::operand::Set &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
- const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder);
+ const std::shared_ptr<TensorBuilder> &tensor_builder);
virtual std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
virtual Stage generate(const ::internal::tflite::op::FullyConnected::Node &node) override;
virtual Stage generate(const ::internal::tflite::op::Reshape::Node &node) override;
virtual Stage generate(const ::internal::tflite::op::Softmax::Node &node) override;
- virtual Stage
- generate(const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node) override;
- virtual Stage
- generate(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node) override;
- virtual Stage
- generate(const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node) override;
- virtual Stage
- generate(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node) override;
virtual Stage generate(const ::internal::tflite::op::NOP::Node &node) override;
private:
const neurun::graph::operand::Set &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
- std::shared_ptr<::internal::common::TensorBuilder> _common_tensor_builder;
};
} // namespace acl_cl
_inds.insert(ind.asInt());
}
-void TensorBuilder::markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind)
-{
- _from_common_candidates.emplace_back(op, ind);
-}
-
-void TensorBuilder::markToCommon(const ::internal::tflite::op::Node &op, int32_t ind)
-{
- _to_common_candidates.emplace_back(op, ind);
-}
-
-void TensorBuilder::insertTensorConvertNodes(::internal::tflite::op::Sequence &operations)
-{
- for (auto param : _from_common_candidates)
- {
- auto index = operations.find(param.op);
- if (index != -1)
- {
- operations.insert<::internal::tflite::op::TensorConvert::AclFromCommon::Node>(
- index, ::internal::tflite::op::TensorConvert::AclFromCommon::Param(param.tensor_index));
- }
- }
-
- for (auto param : _to_common_candidates)
- {
- auto index = operations.find(param.op);
- if (index != -1)
- {
- operations.insert<::internal::tflite::op::TensorConvert::AclToCommon::Node>(
- index + 1, ::internal::tflite::op::TensorConvert::AclToCommon::Param(param.tensor_index));
- }
- }
-}
-
void TensorBuilder::prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
{
assert(_tensors.size() == 0);
TensorBuilder(codegen::Plan &plan);
virtual void mark(const ::neurun::graph::operand::Index &ind) override;
- virtual void markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind) override;
- virtual void markToCommon(const ::internal::tflite::op::Node &op, int32_t ind) override;
- virtual void insertTensorConvertNodes(::internal::tflite::op::Sequence &operations) override;
virtual void prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
virtual void allocate(void) override;
namespace cpu
{
-StageGenerator::StageGenerator(
- const neurun::graph::operand::Set &operand_ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
- const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder)
- : _ctx(operand_ctx), _tensor_builder(tensor_builder),
- _common_tensor_builder(common_tensor_builder)
+StageGenerator::StageGenerator(const neurun::graph::operand::Set &operand_ctx,
+ const std::shared_ptr<TensorBuilder> &tensor_builder)
+ : _ctx(operand_ctx), _tensor_builder(tensor_builder)
{
// DO NOTHING
}
};
}
-Stage StageGenerator::generate(
- const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node)
-{
- const ::neurun::graph::operand::Index ifm_index{node.param().ifm_index};
-
- struct Param
- {
- int ifm_index;
-
- ::neurun::internal::operand::Shape shape{1};
- };
-
- Param param;
-
- param.ifm_index = ifm_index.asInt();
-
- param.shape = _ctx.at(ifm_index).shape();
-
- auto tensors = _tensor_builder;
-
- _common_tensor_builder->mark(ifm_index);
-
- auto common_tensor_builder = _common_tensor_builder;
-
- return [tensors, common_tensor_builder, param](IExecutionBuilder &builder) {
- const ::neurun::graph::operand::Index ifm_index{param.ifm_index};
-
- auto input_alloc = tensors->at(ifm_index).get();
- auto common_tensor = common_tensor_builder->at(ifm_index);
-
- std::unique_ptr<::neurun::kernel::cpu::TensorConvertFromCommonLayer> fn{
- new ::neurun::kernel::cpu::TensorConvertFromCommonLayer};
-
- fn->configure(common_tensor.get(), input_alloc, param.shape);
-
- builder.append(std::move(fn));
- };
-}
-
-Stage StageGenerator::generate(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node)
-{
- const ::neurun::graph::operand::Index ofm_index{node.param().ofm_index};
-
- struct Param
- {
- int ofm_index;
-
- ::neurun::internal::operand::Shape shape{1};
- };
-
- Param param;
-
- param.ofm_index = ofm_index.asInt();
-
- param.shape = _ctx.at(ofm_index).shape();
-
- auto tensors = _tensor_builder;
-
- _common_tensor_builder->mark(ofm_index);
-
- auto common_tensor_builder = _common_tensor_builder;
-
- return [tensors, common_tensor_builder, param](IExecutionBuilder &builder) {
- const ::neurun::graph::operand::Index ofm_index{param.ofm_index};
-
- auto output_alloc = tensors->at(ofm_index).get();
- auto common_tensor = common_tensor_builder->at(ofm_index);
-
- std::unique_ptr<::neurun::kernel::cpu::TensorConvertToCommonLayer> fn{
- new ::neurun::kernel::cpu::TensorConvertToCommonLayer};
-
- fn->configure(output_alloc, common_tensor.get(), param.shape);
-
- builder.append(std::move(fn));
- };
-}
-
-Stage StageGenerator::generate(
- const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node)
-{
- throw std::runtime_error("Wrong Approach");
-}
-
-Stage StageGenerator::generate(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node)
-{
- throw std::runtime_error("Wrong Approach");
-}
-
Stage StageGenerator::generate(const ::internal::tflite::op::NOP::Node &node)
{
// DO NOTHING
#include "internal/cpu.h"
#include "TensorBuilder.h"
-#include "internal/common/TensorBuilder.h"
-
namespace neurun
{
namespace backend
{
public:
StageGenerator(const neurun::graph::operand::Set &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder,
- const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder);
+ const std::shared_ptr<TensorBuilder> &tensor_builder);
virtual std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
virtual Stage generate(const ::internal::tflite::op::FullyConnected::Node &node) override;
virtual Stage generate(const ::internal::tflite::op::Reshape::Node &node) override;
virtual Stage generate(const ::internal::tflite::op::Softmax::Node &node) override;
- virtual Stage
- generate(const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node) override;
- virtual Stage
- generate(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node) override;
- virtual Stage
- generate(const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node) override;
- virtual Stage
- generate(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node) override;
virtual Stage generate(const ::internal::tflite::op::NOP::Node &node) override;
private:
const neurun::graph::operand::Set &_ctx;
std::shared_ptr<TensorBuilder> _tensor_builder;
- std::shared_ptr<::internal::common::TensorBuilder> _common_tensor_builder;
};
} // namespace cpu
_inds.insert(ind.asInt());
}
-void TensorBuilder::markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind)
-{
- _from_common_candidates.emplace_back(op, ind);
-}
-
-void TensorBuilder::markToCommon(const ::internal::tflite::op::Node &op, int32_t ind)
-{
- _to_common_candidates.emplace_back(op, ind);
-}
-
-void TensorBuilder::insertTensorConvertNodes(::internal::tflite::op::Sequence &operations)
-{
- for (auto param : _from_common_candidates)
- {
- auto index = operations.find(param.op);
- if (index != -1)
- {
- operations.insert<::internal::tflite::op::TensorConvert::CpuFromCommon::Node>(
- index, ::internal::tflite::op::TensorConvert::CpuFromCommon::Param(param.tensor_index));
- }
- }
-
- for (auto param : _to_common_candidates)
- {
- auto index = operations.find(param.op);
- if (index != -1)
- {
- operations.insert<::internal::tflite::op::TensorConvert::CpuToCommon::Node>(
- index + 1, ::internal::tflite::op::TensorConvert::CpuToCommon::Param(param.tensor_index));
- }
- }
-}
-
void TensorBuilder::prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
{
assert(_tensors.size() == 0);
TensorBuilder(codegen::Plan &plan);
virtual void mark(const ::neurun::graph::operand::Index &ind) override;
- virtual void markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind) override;
- virtual void markToCommon(const ::internal::tflite::op::Node &op, int32_t ind) override;
- virtual void insertTensorConvertNodes(::internal::tflite::op::Sequence &operations) override;
virtual void prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
virtual void allocate(void) override;
return ret;
}
-std::shared_ptr<::internal::common::TensorBuilder> BackendResolver::getCommonTensorBuilder()
-{
- return _backend_manager.getCommonTensorBuilder();
-}
-
} // namespace neurun
} // namespace codegen
#define __NEURUN_CODEGEN_BACKEND_RESOLVER_H__
#include <set>
+#include <unordered_map>
#include <typeindex>
#include "logging.h"
#include "internal/BackendManager.h"
#include "backend/IInitializerGenerator.h"
#include "backend/IStageGenerator.h"
-#include "internal/common/TensorBuilder.h"
namespace neurun
{
#include "internal/op/Op.lst"
#undef OP
}
-
- // TODO : It's just workaround. It's logic should be changed.
- _gen_map[typeid(::internal::tflite::op::TensorConvert::CpuFromCommon::Node)] =
- backend_manager.get("cpu");
- _gen_map[typeid(::internal::tflite::op::TensorConvert::CpuToCommon::Node)] =
- backend_manager.get("cpu");
- _gen_map[typeid(::internal::tflite::op::TensorConvert::AclFromCommon::Node)] =
- backend_manager.get("acl_cl");
- _gen_map[typeid(::internal::tflite::op::TensorConvert::AclToCommon::Node)] =
- backend_manager.get("acl_cl");
}
std::shared_ptr<neurun::backend::IInitializerGenerator>
std::shared_ptr<neurun::backend::IStageGenerator> getStageGenerator(const std::type_index &type);
std::shared_ptr<neurun::backend::ITensorBuilder> getTensorBuilder(const std::type_index &type);
std::set<std::shared_ptr<neurun::backend::ITensorBuilder>> getAllTensorBuilders();
- std::shared_ptr<::internal::common::TensorBuilder> getCommonTensorBuilder();
private:
std::unordered_map<std::type_index, ::internal::Backend> _gen_map;
VERBOSE(LIR) << " - Output : OFM(" << node.param().output_index << ")" << std::endl;
}
-void Dumper::visit(const TensorConvert::CpuFromCommon::Node &node)
-{
- VERBOSE(LIR) << "CpuFromCommon" << std::endl;
- // NOTE No details for this node. Soon will be removed.
-}
-
-void Dumper::visit(const TensorConvert::CpuToCommon::Node &node)
-{
- VERBOSE(LIR) << "CpuToCommon" << std::endl;
- // NOTE No details for this node. Soon will be removed.
-}
-
-void Dumper::visit(const TensorConvert::AclFromCommon::Node &node)
-{
- VERBOSE(LIR) << "AclFromCommon" << std::endl;
- // NOTE No details for this node. Soon will be removed.
-}
-
-void Dumper::visit(const TensorConvert::AclToCommon::Node &node)
-{
- VERBOSE(LIR) << "AclToCommon" << std::endl;
- // NOTE No details for this node. Soon will be removed.
-}
-
void Dumper::visit(const NOP::Node &node)
{
VERBOSE(LIR) << "* NOP" << std::endl;
void visit(const ::internal::tflite::op::FullyConnected::Node &node) override;
void visit(const ::internal::tflite::op::Reshape::Node &node) override;
void visit(const ::internal::tflite::op::Softmax::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node) override;
void visit(const ::internal::tflite::op::NOP::Node &node) override;
};
void PlanBuilder::finalize(BackendResolver &backend_resolver)
{
auto tensor_builders = backend_resolver.getAllTensorBuilders();
- auto common_tensor_builder = backend_resolver.getCommonTensorBuilder();
// Prepare tensors
for (auto &tensor_builder : tensor_builders)
tensor_builder->prepare(_tensor_info_ctx);
}
- common_tensor_builder->prepare(_tensor_info_ctx);
-
// Process Stage
ExecutionBuilder execution_builder{_plan};
tensor_builder->allocate();
}
- common_tensor_builder->allocate();
-
// Fill weight/bias
for (auto it = _initializer_ctx.begin(); it != _initializer_ctx.end(); ++it)
{
_builder.addStage(stage_gen->generate(node));
}
-void Planner::visit(const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node)
-{
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node)
-{
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node)
-{
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
-void Planner::visit(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node)
-{
- auto stage_gen = _backend_resolver.getStageGenerator(typeid(node));
- _builder.addStage(stage_gen->generate(node));
-}
-
void Planner::visit(const ::internal::tflite::op::NOP::Node &node)
{
// DO NOTHING
void visit(const ::internal::tflite::op::FullyConnected::Node &node) override;
void visit(const ::internal::tflite::op::Reshape::Node &node) override;
void visit(const ::internal::tflite::op::Softmax::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node) override;
void visit(const ::internal::tflite::op::NOP::Node &node) override;
private:
mark(param.ifm_index);
mark(param.ker_index);
mark(param.bias_index);
-
- markToCommon(node, param.ofm_index);
- markFromCommon(node, param.ifm_index);
}
void TensorMarker::visit(const ::internal::tflite::op::MaxPool2D::implicit::Node &node)
const auto ¶m = node.param();
mark(param.ofm_index);
mark(param.ifm_index);
-
- markToCommon(node, param.ofm_index);
- markFromCommon(node, param.ifm_index);
}
void TensorMarker::visit(const ::internal::tflite::op::AvgPool2D::implicit::Node &node)
const auto ¶m = node.param();
mark(param.ofm_index);
mark(param.ifm_index);
-
- markToCommon(node, param.ofm_index);
- markFromCommon(node, param.ifm_index);
}
void TensorMarker::visit(const ::internal::tflite::op::Concat::Node &node)
{
const auto ¶m = node.param();
mark(param.ofm_index);
- markToCommon(node, param.ofm_index);
for (auto ind : param.ifm_indexes)
{
mark(ind);
- markFromCommon(node, ind);
}
}
mark(param.input_index);
mark(param.weight_index);
mark(param.bias_index);
-
- markToCommon(node, param.output_index);
- markFromCommon(node, param.input_index);
}
void TensorMarker::visit(const ::internal::tflite::op::Reshape::Node &node)
const auto ¶m = node.param();
mark(param.output_index);
mark(param.input_index);
-
- markToCommon(node, param.output_index);
- markFromCommon(node, param.input_index);
}
void TensorMarker::visit(const ::internal::tflite::op::Softmax::Node &node)
const auto ¶m = node.param();
mark(param.output_index);
mark(param.input_index);
-
- markToCommon(node, param.output_index);
- markFromCommon(node, param.input_index);
-}
-
-void TensorMarker::visit(const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node)
-{
- // DO NOTHING
-}
-
-void TensorMarker::visit(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node)
-{
- // DO NOTHING
-}
-
-void TensorMarker::visit(const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node)
-{
- // DO NOTHING
-}
-
-void TensorMarker::visit(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node)
-{
- // DO NOTHING
}
void TensorMarker::visit(const ::internal::tflite::op::NOP::Node &node)
void visit(const ::internal::tflite::op::FullyConnected::Node &node) override;
void visit(const ::internal::tflite::op::Reshape::Node &node) override;
void visit(const ::internal::tflite::op::Softmax::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::CpuFromCommon::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::CpuToCommon::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::AclFromCommon::Node &node) override;
- void visit(const ::internal::tflite::op::TensorConvert::AclToCommon::Node &node) override;
void visit(const ::internal::tflite::op::NOP::Node &node) override;
private:
void mark(int32_t ind) { _tensor_builder.mark(::neurun::graph::operand::Index{ind}); }
- void markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind)
- {
- _tensor_builder.markFromCommon(op, ind);
- }
- void markToCommon(const ::internal::tflite::op::Node &op, int32_t ind)
- {
- _tensor_builder.markToCommon(op, ind);
- }
private:
neurun::backend::ITensorBuilder &_tensor_builder;
#include "internal/Padding.h"
#include "backend/IInitializerGenerator.h"
#include "backend/IStageGenerator.h"
-#include "internal/common/Tensor.h"
-#include "internal/common/TensorBuilder.h"
#include "compilation.h"
#include "model.h"
linear->markTensors(backend_resolver);
-#if 0 // Tensor Conversion disabled
- auto tensor_builders = backend_resolver.getAllTensorBuilders();
-
- for (auto tensor_builder : tensor_builders)
- {
- tensor_builder->insertTensorConvertNodes(operations);
- }
-#endif
-
linear->accept(neurun::codegen::Planner{operands, plan_builder, backend_resolver});
// TODO Add optimization passes
{
const auto &operands = _plan.model().operands();
- _common_tensor_builder = std::make_shared<::internal::common::TensorBuilder>(_plan);
-
// Add arm_compute backend
{
using namespace ::neurun::backend::acl_cl;
auto acl_tensor_builder = std::make_shared<TensorBuilder>(_plan);
auto acl_initializer_gen = std::make_shared<InitializerGenerator>(operands);
- auto acl_stage_gen =
- std::make_shared<StageGenerator>(operands, acl_tensor_builder, _common_tensor_builder);
+ auto acl_stage_gen = std::make_shared<StageGenerator>(operands, acl_tensor_builder);
// TODO Do not use magic string for backend id
_gen_map["acl_cl"] = {acl_initializer_gen, acl_stage_gen};
using namespace ::neurun::backend::cpu;
auto cpu_tensor_builder = std::make_shared<TensorBuilder>(_plan);
auto cpu_initializer_gen = std::make_shared<InitializerGenerator>(operands);
- auto cpu_stage_gen =
- std::make_shared<StageGenerator>(operands, cpu_tensor_builder, _common_tensor_builder);
+ auto cpu_stage_gen = std::make_shared<StageGenerator>(operands, cpu_tensor_builder);
// TODO Do not use magic string for backend id
_gen_map["cpu"] = {cpu_initializer_gen, cpu_stage_gen};
Backend BackendManager::get(const std::string &key) { return _gen_map.at(key); }
-std::shared_ptr<::internal::common::TensorBuilder> BackendManager::getCommonTensorBuilder()
-{
- return _common_tensor_builder;
-}
-
} // namespace internal
#include "backend/IInitializerGenerator.h"
#include "backend/IStageGenerator.h"
#include "backend/ITensorBuilder.h"
-#include "internal/common/TensorBuilder.h"
namespace internal
{
Backend get(const std::string &key);
- std::shared_ptr<::internal::common::TensorBuilder> getCommonTensorBuilder();
-
private:
neurun::codegen::Plan &_plan;
std::map<std::string, Backend> _gen_map;
- std::shared_ptr<::internal::common::TensorBuilder> _common_tensor_builder;
};
} // namespace internal
#include "internal/nnapi/feature/View.h"
#include "internal/nnapi/feature/Reader.h"
-#include "internal/common/Tensor.h"
-
struct Sink
{
virtual ~Sink() = default;
public:
void pull(::arm_compute::ITensor &tensor) const override
{
-// NOTE Leave common tensor conversion code before Graph IR supports tensor conversion
-#if 0
- // Only for common tensor now
- assert(typeid(tensor) == typeid(::internal::common::Tensor));
-#endif
-
float *base = reinterpret_cast<float *>(_base);
for (int32_t n = 0; n < _vlen; ++n)
into.at(bat, ch, row, col) = value;
};
}
-
-// NOTE Leave common tensor conversion code before Graph IR supports tensor conversion
-#if 0
- // Only for common tensor now
- assert(typeid(tensor) == typeid(::internal::common::Tensor));
-
- // nnapi tensor ordering == common tensor ordering
- const ::internal::nnapi::feature::Reader<float> from{_shape, tensor.buffer(), _size};
- ::internal::nnapi::feature::View<float> into{_shape, _base, _size};
-
- ::nnfw::util::feature::iterate(_shape)
- << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(batch, ch, row, col);
- into.at(batch, ch, row, col) = value;
- };
-#endif
}
private:
#include "backend/acl_cl/feature/View.h"
-#include "internal/common/Tensor.h"
-
struct Source
{
virtual ~Source() = default;
public:
void push(::arm_compute::ITensor &tensor) const override
{
-// NOTE Leave common tensor conversion code before Graph IR supports tensor conversion
-#if 0
- // Only for common tensor now
- assert(typeid(tensor) == typeid(::internal::common::Tensor));
-#endif
-
auto base = reinterpret_cast<const float *>(_base);
for (int32_t n = 0; n < _vlen; ++n)
into.at(bat, ch, row, col) = value;
};
}
-
-// NOTE Leave common tensor conversion code before Graph IR supports tensor conversion
-#if 0
- // Only for common tensor now
- assert(typeid(tensor) == typeid(::internal::common::Tensor));
-
- // nnapi tensor ordering == common tensor ordering
- const ::internal::nnapi::feature::Reader<float> from{_shape, _base, _size};
- ::internal::nnapi::feature::View<float> into{_shape, tensor.buffer(), _size};
-
- ::nnfw::util::feature::iterate(_shape)
- << [&](uint32_t batch, uint32_t ch, uint32_t row, uint32_t col) {
- const auto value = from.at(batch, ch, row, col);
- into.at(batch, ch, row, col) = value;
- };
-#endif
}
private:
+++ /dev/null
-#ifndef __INTERNAL_COMMON_TENSOR_H__
-#define __INTERNAL_COMMON_TENSOR_H__
-
-#include <arm_compute/core/ITensor.h>
-#include <arm_compute/core/TensorInfo.h>
-
-#include "backend/IObject.h"
-
-namespace internal
-{
-namespace common
-{
-
-class Tensor : public ::arm_compute::ITensor
-{
-public:
- Tensor() = default;
-
- Tensor(::arm_compute::TensorInfo info) : _info(info)
- {
- // DO_NOTING
- }
-
- Tensor(uint8_t *buffer) : _buffer(buffer)
- {
- // DO NOTHING
- }
-
-public:
- void setBuffer(uint8_t *buffer) { _buffer = buffer; }
-
-public:
- ::arm_compute::TensorInfo *info() const override
- {
- return const_cast<::arm_compute::TensorInfo *>(&_info);
- }
-
- ::arm_compute::TensorInfo *info() override { return &_info; }
-
- uint8_t *buffer() const override { return _buffer; }
-
- void allocate()
- {
- uint32_t size = _info.total_size(); // NOTE This size may not be accurate
- _buffer = new uint8_t[size]; // NOTE The allocated buffer is never deallocated.
- }
-
-private:
- ::arm_compute::TensorInfo _info;
- uint8_t *_buffer = nullptr;
-};
-
-} // common
-} // internal
-
-#endif // __INTERNAL_COMMON_TENSOR_H__
+++ /dev/null
-#include "TensorBuilder.h"
-
-#include <algorithm>
-
-namespace internal
-{
-namespace common
-{
-
-TensorBuilder::TensorBuilder(neurun::codegen::Plan &plan) : _plan(plan)
-{
- // DO NOTHING
-}
-
-void TensorBuilder::mark(const ::neurun::graph::operand::Index &ind)
-{
- assert(_tensors.size() == 0);
-
- int index = ind.asInt();
-
- auto it = _inds.find(index);
- if (it == _inds.end())
- {
- _inds.insert(index);
- }
-}
-
-void TensorBuilder::markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind)
-{
- // DO NOTHING
- throw std::runtime_error("Wrong Approach");
-}
-
-void TensorBuilder::markToCommon(const ::internal::tflite::op::Node &op, int32_t ind)
-{
- // DO NOTHING
- throw std::runtime_error("Wrong Approach");
-}
-
-void TensorBuilder::insertTensorConvertNodes(::internal::tflite::op::Sequence &operations)
-{
- // DO NOTHING
- throw std::runtime_error("Wrong Approach");
-}
-
-void TensorBuilder::prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx)
-{
- assert(_tensors.size() == 0);
-
- for (auto ind_int : _inds)
- {
- ::neurun::graph::operand::Index ind{ind_int};
- auto tensor = std::make_shared<::internal::common::Tensor>(tensor_info_ctx.at(ind.asInt()));
- _plan.common_operands().set(ind, std::make_shared<::internal::common::Object>(tensor));
- _tensors[ind.asInt()] = tensor;
- }
-}
-
-void TensorBuilder::allocate(void)
-{
- assert(_inds.size() == _tensors.size());
-
- for (auto it : _tensors)
- {
- it.second->allocate();
- }
-}
-
-std::shared_ptr<::internal::common::Tensor>
-TensorBuilder::at(const ::neurun::graph::operand::Index &ind)
-{
- return _tensors.at(ind.asInt());
-}
-
-} // namespace common
-} // namespace internal
+++ /dev/null
-#ifndef __INTERNAL_COMMON_TENSOR_BUILDER_H__
-#define __INTERNAL_COMMON_TENSOR_BUILDER_H__
-
-#include <unordered_set>
-#include <unordered_map>
-
-#include "backend/ITensorBuilder.h"
-#include "codegen/Plan.h"
-#include "internal/common/Tensor.h"
-#include "internal/common/common.h"
-
-namespace internal
-{
-namespace common
-{
-
-class Plan;
-
-class TensorBuilder : public neurun::backend::ITensorBuilder
-{
-public:
- TensorBuilder(neurun::codegen::Plan &plan);
-
- virtual void mark(const ::neurun::graph::operand::Index &ind) override;
- virtual void markFromCommon(const ::internal::tflite::op::Node &op, int32_t ind) override;
- virtual void markToCommon(const ::internal::tflite::op::Node &op, int32_t ind) override;
- virtual void insertTensorConvertNodes(::internal::tflite::op::Sequence &operations) override;
- virtual void prepare(const std::map<int, ::arm_compute::TensorInfo> &tensor_info_ctx) override;
- virtual void allocate(void) override;
-
- std::shared_ptr<::internal::common::Tensor> at(const ::neurun::graph::operand::Index &ind);
-
-private:
- neurun::codegen::Plan &_plan;
- std::unordered_set<int> _inds;
- std::unordered_map<int, std::shared_ptr<::internal::common::Tensor>> _tensors;
-};
-
-} // namespace common
-} // namespace internal
-
-#endif // __INTERNAL_COMMON_TENSOR_BUILDER_H__
+++ /dev/null
-#include "common.h"
-
-namespace internal
-{
-namespace common
-{
-
-void Object::access(const std::function<void(::arm_compute::ITensor &tensor)> &fn) const
-{
- fn(*_tensor);
-}
-
-} // common
-} // internal
+++ /dev/null
-#ifndef __INTERNAL_COMMON_H__
-#define __INTERNAL_COMMON_H__
-
-#include "backend/IObject.h"
-#include "Tensor.h"
-
-namespace internal
-{
-namespace common
-{
-
-class Object : public neurun::backend::operand::IObject
-{
-public:
- Object() = default;
-
-public:
- Object(const std::shared_ptr<Tensor> &tensor) : _tensor{tensor}
- {
- // DO NOTHING
- }
-
-public:
- Tensor *ptr(void) const override { return _tensor.get(); }
-
-private:
- std::shared_ptr<Tensor> _tensor;
-
-public:
- void access(const std::function<void(::arm_compute::ITensor &tensor)> &fn) const override;
-};
-
-} // common
-} // internal
-
-#endif // __INTERNAL_COMMON_H__
#include "internal/op/Reshape.h"
#include "internal/op/FullyConnected.h"
#include "internal/op/Softmax.h"
-#include "internal/op/TensorConvert_Cpu.h"
-#include "internal/op/TensorConvert_Acl.h"
#include "internal/op/NOP.h"
namespace internal
virtual void visit(const Reshape::Node &) = 0;
virtual void visit(const FullyConnected::Node &) = 0;
virtual void visit(const Softmax::Node &) = 0;
- virtual void visit(const TensorConvert::CpuFromCommon::Node &) = 0;
- virtual void visit(const TensorConvert::CpuToCommon::Node &) = 0;
- virtual void visit(const TensorConvert::AclFromCommon::Node &) = 0;
- virtual void visit(const TensorConvert::AclToCommon::Node &) = 0;
virtual void visit(const NOP::Node &) = 0;
};
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "internal/op/TensorConvert_Acl.h"
-#include "internal/op/NodeVisitor.h"
-
-#include <cassert>
-
-namespace internal
-{
-namespace tflite
-{
-namespace op
-{
-namespace TensorConvert
-{
-namespace AclFromCommon
-{
-
-void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
-
-} // namespace AclFromCommon
-} // namespace TensorConvert
-} // namespace op
-} // namespace tflite
-} // namespace internal
-
-namespace internal
-{
-namespace tflite
-{
-namespace op
-{
-namespace TensorConvert
-{
-namespace AclFromCommon
-{
-
-Param::Param(uint32_t inputIndex) { ifm_index = inputIndex; }
-
-} // namespace AclFromCommon
-} // namespace TensorConvert
-} // namespace op
-} // namespace tflite
-} // namespace internal
-
-namespace internal
-{
-namespace tflite
-{
-namespace op
-{
-namespace TensorConvert
-{
-namespace AclToCommon
-{
-
-void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
-
-} // namespace AclToCommon
-} // namespace TensorConvert
-} // namespace op
-} // namespace tflite
-} // namespace internal
-
-namespace internal
-{
-namespace tflite
-{
-namespace op
-{
-namespace TensorConvert
-{
-namespace AclToCommon
-{
-
-Param::Param(uint32_t outputIndex) { ofm_index = outputIndex; }
-
-} // namespace AclToCommon
-} // namespace TensorConvert
-} // namespace op
-} // namespace tflite
-} // namespace internal
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __INTERNAL_OP_TENSOR_CONVERT_ACL_H__
-#define __INTERNAL_OP_TENSOR_CONVERT_ACL_H__
-
-#include "internal/op/Node.h"
-
-#include <cstdint>
-
-namespace internal
-{
-namespace tflite
-{
-namespace op
-{
-namespace TensorConvert
-{
-namespace AclFromCommon
-{
-
-struct Param
-{
- int32_t ifm_index;
-
- Param() = default;
- Param(uint32_t inputIndex);
-};
-
-class Node final : public op::Node
-{
-public:
- Node(const Param ¶m) : _param(param)
- {
- // DO NOTHING
- }
-
-public:
- virtual ~Node() = default;
-
-public:
- const Param ¶m(void) const { return _param; }
-
-public:
- void accept(NodeVisitor &&) const override;
-
-private:
- const Param _param;
-};
-
-} // namespace AclFromCommon
-} // namespace TensorConvert
-} // namespace op
-} // namespace tflite
-} // namespace internal
-
-namespace internal
-{
-namespace tflite
-{
-namespace op
-{
-namespace TensorConvert
-{
-namespace AclToCommon
-{
-
-struct Param
-{
- int32_t ofm_index;
-
- Param() = default;
- Param(uint32_t outputIndex);
-};
-
-class Node final : public op::Node
-{
-public:
- Node(const Param ¶m) : _param(param)
- {
- // DO NOTHING
- }
-
-public:
- virtual ~Node() = default;
-
-public:
- const Param ¶m(void) const { return _param; }
-
-public:
- void accept(NodeVisitor &&) const override;
-
-private:
- const Param _param;
-};
-
-} // namespace AclToCommon
-} // namespace TensorConvert
-} // namespace op
-} // namespace tflite
-} // namespace internal
-
-#endif // __INTERNAL_OP_TENSOR_CONVERT_ACL_H__
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "internal/op/TensorConvert_Cpu.h"
-#include "internal/op/NodeVisitor.h"
-
-#include <cassert>
-
-namespace internal
-{
-namespace tflite
-{
-namespace op
-{
-namespace TensorConvert
-{
-namespace CpuFromCommon
-{
-
-void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
-
-} // namespace CpuFromCommon
-} // namespace TensorConvert
-} // namespace op
-} // namespace tflite
-} // namespace internal
-
-namespace internal
-{
-namespace tflite
-{
-namespace op
-{
-namespace TensorConvert
-{
-namespace CpuFromCommon
-{
-
-Param::Param(uint32_t inputIndex) { ifm_index = inputIndex; }
-
-} // namespace FromCommon
-} // namespace TensorConvert
-} // namespace op
-} // namespace tflite
-} // namespace internal
-
-namespace internal
-{
-namespace tflite
-{
-namespace op
-{
-namespace TensorConvert
-{
-namespace CpuToCommon
-{
-
-void Node::accept(NodeVisitor &&v) const { v.visit(*this); }
-
-} // namespace CpuToCommon
-} // namespace TensorConvert
-} // namespace op
-} // namespace tflite
-} // namespace internal
-
-namespace internal
-{
-namespace tflite
-{
-namespace op
-{
-namespace TensorConvert
-{
-namespace CpuToCommon
-{
-
-Param::Param(uint32_t outputIndex) { ofm_index = outputIndex; }
-
-} // namespace CpuToCommon
-} // namespace TensorConvert
-} // namespace op
-} // namespace tflite
-} // namespace internal
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __INTERNAL_OP_TENSOR_CONVERT_CPU_H__
-#define __INTERNAL_OP_TENSOR_CONVERT_CPU_H__
-
-#include "internal/op/Node.h"
-
-#include <cstdint>
-
-namespace internal
-{
-namespace tflite
-{
-namespace op
-{
-namespace TensorConvert
-{
-namespace CpuFromCommon
-{
-
-struct Param
-{
- int32_t ifm_index;
-
- Param() = default;
- Param(uint32_t inputIndex);
-};
-
-class Node final : public op::Node
-{
-public:
- Node(const Param ¶m) : _param(param)
- {
- // DO NOTHING
- }
-
-public:
- virtual ~Node() = default;
-
-public:
- const Param ¶m(void) const { return _param; }
-
-public:
- void accept(NodeVisitor &&) const override;
-
-private:
- const Param _param;
-};
-
-} // namespace CpuFromCommon
-} // namespace TensorConvert
-} // namespace op
-} // namespace tflite
-} // namespace internal
-
-namespace internal
-{
-namespace tflite
-{
-namespace op
-{
-namespace TensorConvert
-{
-namespace CpuToCommon
-{
-
-struct Param
-{
- int32_t ofm_index;
-
- Param() = default;
- Param(uint32_t outputIndex);
-};
-
-class Node final : public op::Node
-{
-public:
- Node(const Param ¶m) : _param(param)
- {
- // DO NOTHING
- }
-
-public:
- virtual ~Node() = default;
-
-public:
- const Param ¶m(void) const { return _param; }
-
-public:
- void accept(NodeVisitor &&) const override;
-
-private:
- const Param _param;
-};
-
-} // namespace CpuToCommon
-} // namespace TensorConvert
-} // namespace op
-} // namespace tflite
-} // namespace internal
-
-#endif // __INTERNAL_OP_TENSOR_CONVERT_CPU_H__
* limitations under the License.
*/
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
#include "TensorConvertFromCommonLayer.h"
#include "internal/nnapi/feature/Reader.h"
} // namespace acl_cl
} // namespace kernel
} // namespace neurun
+
+#endif
* limitations under the License.
*/
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
#ifndef __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
#define __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
} // namespace neurun
#endif // __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
+
+#endif
* limitations under the License.
*/
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
#include "TensorConvertToCommonLayer.h"
#include "backend/acl_cl/feature/View.h"
} // namespace acl_cl
} // namespace kernel
} // namespace neurun
+
+#endif
* limitations under the License.
*/
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
#ifndef __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_TO_COMMON_LAYER_H__
#define __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_TO_COMMON_LAYER_H__
} // namespace neurun
#endif // __INTERNAL_KERNELS_ACL_CL_TENSOR_CONVERT_TO_COMMON_LAYER_H__
+
+#endif
* limitations under the License.
*/
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
#include "TensorConvertFromCommonLayer.h"
#include "internal/nnapi/feature/Reader.h"
} // namespace cpu
} // namespace kernel
} // namespace neurun
+
+#endif
* limitations under the License.
*/
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
#ifndef __NEURUN_KERNEL_CPU_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
#define __NEURUN_KERNEL_CPU_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
} // namespace neurun
#endif // __NEURUN_KERNEL_CPU_TENSOR_CONVERT_FROM_COMMON_LAYER_H__
+
+#endif
* limitations under the License.
*/
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
#include "TensorConvertToCommonLayer.h"
#include "internal/nnapi/feature/Reader.h"
} // namespace cpu
} // namespace kernel
} // namespace neurun
+
+#endif
* limitations under the License.
*/
+//
+// THIS FILE IS UNUSED BUT LEFT FOR FUTURE REFERNCE
+//
+
+#if 0
+
#ifndef __NEURUN_KERNEL_CPU_TENSOR_CONVERT_TO_COMMON_LAYER_H__
#define __NEURUN_KERNEL_CPU_TENSOR_CONVERT_TO_COMMON_LAYER_H__
} // namespace neurun
#endif // __NEURUN_KERNEL_CPU_TENSOR_CONVERT_TO_COMMON_LAYER_H__
+
+#endif