#include <model/Operands.h>
#include "Config.h"
-#include "StageGenerator.h"
#include "KernelGenerator.h"
#include "ShapeFixer.h"
#include "MemoryManager.h"
std::shared_ptr<TensorBuilder> tensor_builder =
std::make_shared<TensorBuilder>(createMemoryManager()))
: ::neurun::backend::Backend{std::make_shared<Config>(),
- std::make_shared<StageGenerator>(operand_ctx, tensor_builder),
std::make_shared<KernelGenerator>(operand_ctx, tensor_builder),
std::make_shared<ShapeFixer>(operand_ctx, tensor_builder)}
{
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "StageGenerator.h"
-
-#include <arm_compute/runtime/CL/CLFunctions.h> // Include all ARM Compute CL functions
-#include <arm_compute/runtime/CL/CLFunctionsEx.h> // Include all ARM Compute EX CL functions
-#include <arm_compute/runtime/misc/functions/GenericGather.h>
-#include <arm_compute/runtime/misc/functions/GenericReshapeLayer.h>
-
-#include "kernel/ConcatLayer.h"
-#include "model/Index.h"
-#include "model/DataType.h"
-#include "model/InternalType.h"
-#include "compiler/IExecutionBuilder.h"
-#include "exec/NopFunction.h"
-#include "util/logging.h"
-#include "util/Utils.h"
-#include "util/Padding.h"
-#include "acl_common/AclFunction.h"
-#include "acl_common/Convert.h"
-#include "acl_common/Swizzle.h"
-
-using ::neurun::compiler::IExecutionBuilder;
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-using ::neurun::backend::acl_common::asAclFunction;
-
-//
-// StageGenerator
-//
-StageGenerator::StageGenerator(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder)
- : _ctx(ctx), _tensor_builder(tensor_builder)
-{
- // DO NOTHING
-}
-
-void StageGenerator::visit(const model::operation::CastNode &) {}
-
-void StageGenerator::visit(const model::operation::Conv2DNode &) {}
-
-void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &) {}
-
-void StageGenerator::visit(const model::operation::MaxPool2DNode &) {}
-
-void StageGenerator::visit(const model::operation::AvgPool2DNode &) {}
-
-void StageGenerator::visit(const model::operation::ConcatNode &) {}
-
-void StageGenerator::visit(const model::operation::FullyConnectedNode &) {}
-
-void StageGenerator::visit(const model::operation::MulNode &) {}
-
-void StageGenerator::visit(const model::operation::ReduceSumNode &) {}
-
-void StageGenerator::visit(const model::operation::ReshapeNode &) {}
-
-void StageGenerator::visit(const model::operation::SqueezeNode &) {}
-
-void StageGenerator::visit(const model::operation::TanhNode &) {}
-
-void StageGenerator::visit(const model::operation::SoftmaxNode &) {}
-
-void StageGenerator::visit(const model::operation::StridedSliceNode &) {}
-
-void StageGenerator::visit(const model::operation::TransposeNode &) {}
-
-void StageGenerator::visit(const model::operation::AddNode &) {}
-
-void StageGenerator::visit(const model::operation::SubNode &) {}
-
-void StageGenerator::visit(const model::operation::DivNode &) {}
-
-void StageGenerator::visit(const model::operation::ExpNode &) {}
-
-void StageGenerator::visit(const model::operation::LogisticNode &) {}
-
-void StageGenerator::visit(const model::operation::LogicalAndNode &) {}
-
-void StageGenerator::visit(const model::operation::LSTMNode &) {}
-
-void StageGenerator::visit(const model::operation::ReduceMaxNode &) {}
-
-void StageGenerator::visit(const model::operation::ComparisonNode &) {}
-
-void StageGenerator::visit(const model::operation::RSQRTNode &) {}
-
-void StageGenerator::visit(const model::operation::ReLUNode &) {}
-
-void StageGenerator::visit(const model::operation::ResizeBilinearNode &) {}
-
-void StageGenerator::visit(const model::operation::ReLU1Node &) {}
-
-void StageGenerator::visit(const model::operation::ReLU6Node &) {}
-
-void StageGenerator::visit(const model::operation::RNNNode &) {}
-
-void StageGenerator::visit(const model::operation::FloorNode &) {}
-
-void StageGenerator::visit(const model::operation::SpaceToDepthNode &) {}
-
-void StageGenerator::visit(const model::operation::L2Pool2DNode &) {}
-
-void StageGenerator::visit(const model::operation::EmbeddingLookupNode &) {}
-
-void StageGenerator::visit(const model::operation::L2NormalizationNode &) {}
-
-void StageGenerator::visit(const model::operation::HashtableLookupNode &) {}
-
-void StageGenerator::visit(const model::operation::PReLUNode &) {}
-
-void StageGenerator::visit(const model::operation::TransposeConvNode &) {}
-
-void StageGenerator::visit(const model::operation::SQRTNode &) {}
-
-void StageGenerator::visit(const model::operation::LogicalOrNode &) {}
-
-void StageGenerator::visit(const model::operation::LogicalNotNode &) {}
-
-void StageGenerator::visit(const model::operation::SquaredDifferenceNode &) {}
-
-void StageGenerator::visit(const model::operation::TopKV2Node &) {}
-
-void StageGenerator::visit(const model::operation::GatherNode &) {}
-
-void StageGenerator::visit(const model::operation::NegNode &) {}
-
-void StageGenerator::visit(const model::operation::AbsNode &) {}
-
-void StageGenerator::visit(const model::operation::ArgMaxNode &) {}
-
-void StageGenerator::visit(const model::operation::DequantizeNode &) {}
-
-void StageGenerator::visit(const model::operation::MeanNode &) {}
-
-void StageGenerator::visit(const model::operation::LocalResponseNormalizationNode &) {}
-
-void StageGenerator::visit(const model::operation::DepthToSpaceNode &) {}
-
-void StageGenerator::visit(const model::operation::ReduceMinNode &) {}
-
-void StageGenerator::visit(const model::operation::SplitNode &) {}
-
-void StageGenerator::visit(const model::operation::UnpackNode &) {}
-
-void StageGenerator::visit(const model::operation::PadNode &) {}
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
-#define __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
-
-#include <backend/IStageGenerator.h>
-
-#include "model/Operands.h"
-#include "TensorBuilder.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_cl
-{
-
-class StageGenerator : public IStageGenerator
-{
-public:
- StageGenerator(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
-
- std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
-
- void visit(const model::operation::Conv2DNode &) override;
- void visit(const model::operation::DepthwiseConv2DNode &) override;
- void visit(const model::operation::MaxPool2DNode &) override;
- void visit(const model::operation::AvgPool2DNode &) override;
- void visit(const model::operation::ConcatNode &) override;
- void visit(const model::operation::FullyConnectedNode &) override;
- void visit(const model::operation::MulNode &) override;
- void visit(const model::operation::ReduceSumNode &) override;
- void visit(const model::operation::ReshapeNode &) override;
- void visit(const model::operation::SqueezeNode &) override;
- void visit(const model::operation::TanhNode &) override;
- void visit(const model::operation::SoftmaxNode &) override;
- void visit(const model::operation::StridedSliceNode &) override;
- void visit(const model::operation::TransposeNode &) override;
- void visit(const model::operation::AddNode &) override;
- void visit(const model::operation::SubNode &) override;
- void visit(const model::operation::CastNode &) override;
- void visit(const model::operation::DivNode &) override;
- void visit(const model::operation::ExpNode &) override;
- void visit(const model::operation::LogisticNode &) override;
- void visit(const model::operation::ReduceMaxNode &) override;
- void visit(const model::operation::ComparisonNode &) override;
- void visit(const model::operation::LogicalAndNode &) override;
- void visit(const model::operation::LSTMNode &) override;
- void visit(const model::operation::RSQRTNode &) override;
- void visit(const model::operation::ReLUNode &) override;
- void visit(const model::operation::ResizeBilinearNode &) override;
- void visit(const model::operation::ReLU1Node &) override;
- void visit(const model::operation::ReLU6Node &) override;
- void visit(const model::operation::RNNNode &) override;
- void visit(const model::operation::FloorNode &) override;
- void visit(const model::operation::SpaceToDepthNode &) override;
- void visit(const model::operation::L2Pool2DNode &) override;
- void visit(const model::operation::EmbeddingLookupNode &) override;
- void visit(const model::operation::L2NormalizationNode &) override;
- void visit(const model::operation::HashtableLookupNode &) override;
- void visit(const model::operation::PReLUNode &) override;
- void visit(const model::operation::TransposeConvNode &) override;
- void visit(const model::operation::SQRTNode &) override;
- void visit(const model::operation::LogicalOrNode &) override;
- void visit(const model::operation::LogicalNotNode &) override;
- void visit(const model::operation::SquaredDifferenceNode &) override;
- void visit(const model::operation::TopKV2Node &) override;
- void visit(const model::operation::GatherNode &) override;
- void visit(const model::operation::NegNode &) override;
- void visit(const model::operation::AbsNode &) override;
- void visit(const model::operation::ArgMaxNode &) override;
- void visit(const model::operation::DequantizeNode &) override;
- void visit(const model::operation::MeanNode &) override;
- void visit(const model::operation::LocalResponseNormalizationNode &) override;
- void visit(const model::operation::DepthToSpaceNode &) override;
- void visit(const model::operation::ReduceMinNode &) override;
- void visit(const model::operation::SplitNode &) override;
- void visit(const model::operation::UnpackNode &) override;
- void visit(const model::operation::PadNode &) override;
-
-private:
- const neurun::model::Operands &_ctx;
- std::shared_ptr<TensorBuilder> _tensor_builder;
-};
-
-} // namespace acl_cl
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
#include "Config.h"
#include "KernelGenerator.h"
-#include "StageGenerator.h"
#include "ShapeFixer.h"
#include "MemoryManager.h"
std::shared_ptr<TensorBuilder> tensor_builder =
std::make_shared<TensorBuilder>(createMemoryManager()))
: ::neurun::backend::Backend{std::make_shared<Config>(),
- std::make_shared<StageGenerator>(operand_ctx, tensor_builder),
std::make_shared<KernelGenerator>(operand_ctx, tensor_builder),
std::make_shared<ShapeFixer>(operand_ctx, tensor_builder)}
{
+++ /dev/null
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "StageGenerator.h"
-
-#include <arm_compute/runtime/NEON/functions/NESoftmaxLayer.h>
-#include <arm_compute/runtime/NEON/functions/NEArithmeticAddition.h>
-#include <arm_compute/runtime/NEON/functions/NEArithmeticSubtraction.h>
-#include <arm_compute/runtime/NEON/functions/NEPixelWiseMultiplication.h>
-#include <arm_compute/runtime/NEON/functions/NEPoolingLayer.h>
-#include <arm_compute/runtime/NEON/functions/NEActivationLayer.h>
-#include <arm_compute/runtime/NEON/functions/NEConvolutionLayer.h>
-#include <arm_compute/runtime/NEON/functions/NEDepthwiseConvolutionLayer.h>
-#include <arm_compute/runtime/NEON/functions/NEReshapeLayer.h>
-#include <arm_compute/runtime/NEON/functions/NEFullyConnectedLayer.h>
-#include <arm_compute/runtime/NEON/functions/NEFullyConnectedReshapingLayer.h>
-
-#include "kernel/ConcatLayer.h"
-#include "util/Padding.h"
-#include "model/Index.h"
-#include "model/DataType.h"
-#include "model/InternalType.h"
-#include "compiler/IExecutionBuilder.h"
-#include "exec/NopFunction.h"
-#include "util/logging.h"
-#include "util/Utils.h"
-#include "acl_common/Convert.h"
-#include "acl_common/Swizzle.h"
-
-using ::neurun::compiler::IExecutionBuilder;
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-using ::neurun::backend::acl_common::asAclFunction;
-
-//
-// StageGenerator
-//
-StageGenerator::StageGenerator(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder)
- : _ctx(ctx), _tensor_builder(tensor_builder)
-{
- // DO NOTHING
-}
-
-void StageGenerator::visit(const model::operation::Conv2DNode &) {}
-
-void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &) {}
-
-void StageGenerator::visit(const model::operation::MaxPool2DNode &) {}
-
-void StageGenerator::visit(const model::operation::AvgPool2DNode &) {}
-
-void StageGenerator::visit(const model::operation::ConcatNode &) {}
-
-void StageGenerator::visit(const model::operation::FullyConnectedNode &) {}
-
-void StageGenerator::visit(const model::operation::MulNode &) {}
-
-void StageGenerator::visit(const model::operation::ReshapeNode &) {}
-
-void StageGenerator::visit(const model::operation::TanhNode &) {}
-
-void StageGenerator::visit(const model::operation::SoftmaxNode &) {}
-
-void StageGenerator::visit(const model::operation::AddNode &) {}
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace neurun
+++ /dev/null
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ACL_NEON_STAGE_GENERATOR_H__
-#define __NEURUN_BACKEND_ACL_NEON_STAGE_GENERATOR_H__
-
-#include <backend/IStageGenerator.h>
-
-#include "model/Operands.h"
-#include "TensorBuilder.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace acl_neon
-{
-
-class StageGenerator : public IStageGenerator
-{
-public:
- StageGenerator(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
-
- std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
-
- void visit(const model::operation::Conv2DNode &) override;
- void visit(const model::operation::DepthwiseConv2DNode &) override;
- void visit(const model::operation::MaxPool2DNode &) override;
- void visit(const model::operation::AvgPool2DNode &) override;
- void visit(const model::operation::ConcatNode &) override;
- void visit(const model::operation::FullyConnectedNode &) override;
- void visit(const model::operation::MulNode &) override;
- void visit(const model::operation::ReshapeNode &) override;
- void visit(const model::operation::TanhNode &) override;
- void visit(const model::operation::SoftmaxNode &) override;
- void visit(const model::operation::AddNode &) override;
-
-private:
- const neurun::model::Operands &_ctx;
- std::shared_ptr<TensorBuilder> _tensor_builder;
-};
-
-} // namespace acl_neon
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ACL_NEON_STAGE_GENERATOR_H__
#include "Config.h"
#include "KernelGenerator.h"
-#include "StageGenerator.h"
#include "ShapeFixer.h"
namespace neurun
Backend(const neurun::model::Operands &operand_ctx,
std::shared_ptr<TensorBuilder> tensor_builder = std::make_shared<TensorBuilder>())
: ::neurun::backend::Backend{std::make_shared<Config>(),
- std::make_shared<StageGenerator>(operand_ctx, tensor_builder),
std::make_shared<KernelGenerator>(operand_ctx, tensor_builder),
std::make_shared<ShapeFixer>(operand_ctx, tensor_builder)}
{
VERBOSE(CPU_MEMORYMANAGER) << "TENSOR(#" << ind.value() << "): " << static_cast<void *>(buffer)
<< std::endl;
- // If we do not make tensor here currently, stages would cause segmentation fault.
+ // If we do not make tensor here currently, kernel generation would cause segmentation fault.
// See also : Comments in `allocate` method.
}
}
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "StageGenerator.h"
-
-#include <stdexcept>
-
-#include "cpp14/memory.h"
-#include "util/Padding.h"
-#include "kernel/OperationUtils.h"
-#include "kernel/ConvolutionLayer.h"
-#include "kernel/AvgPoolLayer.h"
-#include "kernel/MaxPoolLayer.h"
-#include "kernel/ConcatLayer.h"
-#include "kernel/FullyConnectedLayer.h"
-#include "kernel/ReshapeLayer.h"
-#include "kernel/SoftMaxLayer.h"
-#include "kernel/PermuteLayer.h"
-#include "kernel/DepthwiseConvolutionLayer.h"
-#include "kernel/AddLayer.h"
-
-#include <backend/Backend.h>
-#include <backend/IConfig.h>
-#include "compiler/IExecutionBuilder.h"
-
-#include "util/logging.h"
-
-#include "util/Utils.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-StageGenerator::StageGenerator(const neurun::model::Operands &operand_ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder)
- : _ctx(operand_ctx), _tensor_builder(tensor_builder)
-{
- // DO NOTHING
-}
-
-void StageGenerator::visit(const model::operation::Conv2DNode &) {}
-
-void StageGenerator::visit(const model::operation::DepthwiseConv2DNode &) {}
-
-void StageGenerator::visit(const model::operation::MaxPool2DNode &) {}
-
-void StageGenerator::visit(const model::operation::AvgPool2DNode &) {}
-
-void StageGenerator::visit(const model::operation::ConcatNode &) {}
-
-void StageGenerator::visit(const model::operation::FullyConnectedNode &) {}
-
-void StageGenerator::visit(const model::operation::MulNode &) {}
-
-void StageGenerator::visit(const model::operation::ReshapeNode &) {}
-
-void StageGenerator::visit(const model::operation::SoftmaxNode &) {}
-
-void StageGenerator::visit(const model::operation::AddNode &) {}
-
-void StageGenerator::visit(const model::operation::PermuteNode &) {}
-
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
-#define __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
-
-#include <backend/IStageGenerator.h>
-
-#include "model/Operands.h"
-#include "operand/Tensor.h"
-#include "TensorBuilder.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace cpu
-{
-
-class StageGenerator : public IStageGenerator
-{
-public:
- StageGenerator(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
-
- std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
-
- void visit(const model::operation::Conv2DNode &) override;
- void visit(const model::operation::DepthwiseConv2DNode &) override;
- void visit(const model::operation::MaxPool2DNode &) override;
- void visit(const model::operation::AvgPool2DNode &) override;
- void visit(const model::operation::ConcatNode &) override;
- void visit(const model::operation::FullyConnectedNode &) override;
- void visit(const model::operation::MulNode &) override;
- void visit(const model::operation::ReshapeNode &) override;
- void visit(const model::operation::SoftmaxNode &) override;
- void visit(const model::operation::AddNode &) override;
- void visit(const model::operation::PermuteNode &) override;
-
-private:
- const neurun::model::Operands &_ctx;
- std::shared_ptr<TensorBuilder> _tensor_builder;
-};
-
-} // namespace cpu
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
void TensorBuilder::allocate(void)
{
// NOTE For now nothing to do. Allocation is done in prepare stage, which is not appropriate
- // This is because CPU kernels require `ITensor`s to be allocated before Stage Generation.
+ // This is because CPU kernels require `ITensor`s to be allocated before Kernel Generation.
}
std::shared_ptr<::neurun::backend::operand::ITensor>
+++ /dev/null
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "StageGenerator.h"
-// to force compilation
+++ /dev/null
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef NNFW_STAGEGENERATOR_H
-#define NNFW_STAGEGENERATOR_H
-
-#include <backend/IStageGenerator.h>
-
-#include "model/Operands.h"
-#include "TensorBuilder.h"
-
-namespace neurun
-{
-namespace backend
-{
-namespace hi_perf_cpu
-{
-
-class StageGenerator : public IStageGenerator
-{
-public:
- StageGenerator(const neurun::model::Operands &ctx,
- const std::shared_ptr<TensorBuilder> &tensor_builder);
- std::shared_ptr<ITensorBuilder> tensor_builder() override;
- // TODO add more ops
-
-private:
- const neurun::model::Operands &_ctx;
- std::shared_ptr<TensorBuilder> _tensor_builder;
-};
-
-} // namespace hi_perf_cpu
-} // namespace backend
-} // namespace neurun
-
-#endif // NNFW_STAGEGENERATOR_H
+++ /dev/null
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ATOMIC_STAGE_H__
-#define __NEURUN_BACKEND_ATOMIC_STAGE_H__
-
-#include <vector>
-#include <functional>
-
-#include "IStage.h"
-
-namespace neurun
-{
-namespace backend
-{
-
-class AtomicStage final : public IStage
-{
-public:
- AtomicStage() = default;
- template <typename F> AtomicStage(F f) : fn(f) {}
-
- virtual void operator()(compiler::IExecutionBuilder &execution_builder) const override
- {
- fn(execution_builder);
- }
-
- virtual void operator<<(StageFn f) override { fn = f; }
-
-private:
- StageFn fn = nullptr;
-};
-
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ATOMIC_STAGE_H__
{
struct IConfig;
-class IStageGenerator;
class IKernelGenerator;
class IShapeFixer;
struct ITensorBuilder;
{
public:
Backend(const std::shared_ptr<neurun::backend::IConfig> &backend_config,
- const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen,
const std::shared_ptr<neurun::backend::IKernelGenerator> &kernel_gen,
const std::shared_ptr<neurun::backend::IShapeFixer> &shape_fixer);
- Backend(void) : _config(nullptr), _stage_gen(nullptr), _kernel_gen(nullptr), _shape_fixer(nullptr)
+ Backend(void) : _config(nullptr), _kernel_gen(nullptr), _shape_fixer(nullptr)
{
// DO NOTHING
}
public:
const std::shared_ptr<neurun::backend::IConfig> config() const;
- const std::shared_ptr<neurun::backend::IStageGenerator> stage_gen() const;
const std::shared_ptr<neurun::backend::IKernelGenerator> kernel_gen() const;
const std::shared_ptr<neurun::backend::IShapeFixer> shape_fixer() const;
const std::shared_ptr<neurun::backend::ITensorBuilder> tensor_builder() const;
private:
std::shared_ptr<neurun::backend::IConfig> _config;
- std::shared_ptr<neurun::backend::IStageGenerator> _stage_gen;
std::shared_ptr<neurun::backend::IKernelGenerator> _kernel_gen;
std::shared_ptr<neurun::backend::IShapeFixer> _shape_fixer;
};
+++ /dev/null
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ISTAGE_H__
-#define __NEURUN_BACKEND_ISTAGE_H__
-
-#include <functional>
-
-namespace neurun
-{
-namespace compiler
-{
-struct IExecutionBuilder;
-} // namespace compiler
-} // namespace neurun
-
-namespace neurun
-{
-namespace backend
-{
-
-using StageFn = std::function<void(compiler::IExecutionBuilder &)>;
-
-struct IStage
-{
- IStage() = default;
- virtual ~IStage() = default;
-
- virtual void operator()(compiler::IExecutionBuilder &execution_builder) const = 0;
- virtual void operator<<(StageFn f) = 0;
-};
-
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ISTAGE_H__
+++ /dev/null
-/*
- * Copyright (c) 2018 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_ISTAGE_GENERATOR_H__
-#define __NEURUN_BACKEND_ISTAGE_GENERATOR_H__
-
-#include <memory>
-#include <functional>
-
-#include "ITensorBuilder.h"
-#include "IStage.h"
-#include "model/OperationVisitor.h"
-#include "model/Subgraph.h"
-#include "cpp14/memory.h"
-
-// TODO Remove dependencies for below header. Should include only interface.
-#include "backend/StageSequence.h"
-
-namespace neurun
-{
-namespace backend
-{
-
-class IStageGenerator : model::OperationVisitor
-{
-public:
- virtual ~IStageGenerator() = default;
-
- virtual std::shared_ptr<ITensorBuilder> tensor_builder() = 0;
-
-protected:
-#define OP(InternalName, IsNnApi) \
- virtual void visit(const model::operation::InternalName &) override \
- { \
- throw std::runtime_error("NYI"); \
- }
-#include "model/Operations.lst"
-#undef OP
-
-protected:
- void returnStage(const StageFn fn)
- {
- assert(_return);
- *_return << fn;
- }
-
-public:
- std::unique_ptr<IStage> generate(const model::Operation &node)
- {
- // TODO Remove directly dependency for classes not interface
- _return = nnfw::cpp14::make_unique<StageSequence>();
- node.accept(*this);
- return std::move(_return);
- }
-
-private:
- std::unique_ptr<IStage> _return = nullptr;
-};
-
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_ISTAGE_GENERATOR_H__
+++ /dev/null
-/*
- * Copyright (c) 2019 Samsung Electronics Co., Ltd. All Rights Reserved
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef __NEURUN_BACKEND_STAGE_SEQUENCE_H__
-#define __NEURUN_BACKEND_STAGE_SEQUENCE_H__
-
-#include <vector>
-#include <functional>
-
-#include "IStage.h"
-#include "backend/AtomicStage.h"
-#include "cpp14/memory.h"
-
-namespace neurun
-{
-namespace backend
-{
-
-class StageSequence final : public IStage
-{
-public:
- StageSequence() = default;
- template <typename F> StageSequence(F f)
- {
- stages.emplace_back(nnfw::cpp14::make_unique<AtomicStage>(f));
- }
-
- virtual void operator()(compiler::IExecutionBuilder &execution_builder) const override
- {
- for (const auto &stage : stages)
- (*stage)(execution_builder);
- }
-
- virtual void operator<<(StageFn f) override
- {
- stages.emplace_back(nnfw::cpp14::make_unique<AtomicStage>(f));
- }
-
-private:
- std::vector<std::unique_ptr<IStage>> stages;
-};
-
-} // namespace backend
-} // namespace neurun
-
-#endif // __NEURUN_BACKEND_STAGE_SEQUENCE_H__
#include "backend/IConfig.h"
#include "backend/ITensorBuilder.h"
-#include "backend/IStageGenerator.h"
#include "backend/IKernelGenerator.h"
#include "backend/IShapeFixer.h"
{
Backend::Backend(const std::shared_ptr<neurun::backend::IConfig> &backend_config,
- const std::shared_ptr<neurun::backend::IStageGenerator> &stage_gen,
const std::shared_ptr<neurun::backend::IKernelGenerator> &kernel_gen,
const std::shared_ptr<neurun::backend::IShapeFixer> &shape_fixer)
- : _config(backend_config), _stage_gen(stage_gen), _kernel_gen(kernel_gen),
- _shape_fixer(shape_fixer)
+ : _config(backend_config), _kernel_gen(kernel_gen), _shape_fixer(shape_fixer)
{
backend_config->initialize();
}
const std::shared_ptr<neurun::backend::IConfig> Backend::config() const { return _config; }
-const std::shared_ptr<neurun::backend::IStageGenerator> Backend::stage_gen() const
-{
- return _stage_gen;
-}
-
const std::shared_ptr<neurun::backend::IKernelGenerator> Backend::kernel_gen() const
{
return _kernel_gen;
const std::shared_ptr<neurun::backend::ITensorBuilder> Backend::tensor_builder() const
{
- return _stage_gen->tensor_builder();
+ return _shape_fixer->tensor_builder();
}
} // namespace backend
* @brief load backend plugin
*
* @param backend backend to be loaded
- * @param operands operands to construct StageGenerator
+ * @param operands operands to construct ShapeFixer and KernelGenerator
*
* @return
*/
#include "util/logging.h"
#include "util/ConfigSource.h"
#include "backend/BackendManager.h"
-#include "backend/IStageGenerator.h"
namespace neurun
{
* Backend dependent analysis & optimization phase
*************************************************/
- // SubTensorInfo should be generated after lower, before stage generation and finalize
+ // SubTensorInfo should be generated after lower, before shape correction and finalize
// because SubTensorAnalyzer assume that insert permutation is already finished
// lower: decide backend and insert permutation
// fix shapes: prepare codegen to optimization
exec::IExecutor *ExecutorFactory::createDataflowExecutor(graph::Graph &graph, bool parallel)
{
auto operand_context = std::make_shared<OperandContext>();
- std::unordered_map<model::SubgraphIndex, std::unique_ptr<backend::IStage>> stages;
graph.subg_ctx().iterate([&](const model::SubgraphIndex &, const model::Subgraph &subg) {
auto subtensor_analyzer = SubTensorAnalyzer{graph.operands()};
auto execution_builder = nnfw::cpp14::make_unique<ExecutionBuilder>();
- // Generate and process stages, generate kernels
- // TODO: when code will be moved from StageGenerator to KernelGenerator - remove StageGenerator
- // related code
+ // Generate kernels
graph.subg_ctx().iterate(
[&](const model::SubgraphIndex &subg_index, const model::Subgraph &subg) {
auto backend = graph.getLowerInfo(subg_index)->backend();
// TODO This approach is temporal. See declaration of `setNextIndex`.
execution_builder->setNextIndex(subg_index);
-
- auto stage_gen = backend->stage_gen();
- (*stage_gen->generate(subg))(*execution_builder);
-
auto kernel_gen = backend->kernel_gen();
kernel_gen->generate(subg, execution_builder.get());
});
*/
#include "ManualScheduler.h"
+#include "model/Operations.Include.h"
#include "backend/Backend.h"
#include "backend/BackendManager.h"
#include "backend/IConfig.h"
#include "PlanBuilder.h"
#include "backend/operand/IObject.h"
-#include "linear/Linear.h"
#include "backend/Backend.h"
#include "backend/IKernelGenerator.h"
+#include "linear/Linear.h"
namespace neurun
{
namespace compiler
{
-void PlanBuilder::addStage(std::unique_ptr<backend::IStage> stage)
-{
- _stages.emplace_back(std::move(stage));
-}
-
void PlanBuilder::finalize(const linear::Linear *linear,
const backend::TensorBuilderSet &tensor_builders)
{
});
}
- // Generate and process stages and generate kernels
- // TODO: when code will be moved from StageGenerator to KernelGenerator - remove StageGenerator
- // related code
+ // Generate kernels
auto execution_builder = nnfw::cpp14::make_unique<ExecutionBuilder>(_operations);
linear->iterate([&](const linear::Element &element) {
auto backend = element.lower_info->backend();
-
- auto stage_gen = backend->stage_gen();
- (*stage_gen->generate(*element.subgraph))(*execution_builder);
-
auto kernel_gen = backend->kernel_gen();
kernel_gen->generate(*element.subgraph, execution_builder.get());
});
#include "compiler/OperandContext.h"
#include "compiler/operation/Sequence.h"
#include "compiler/IExecutionBuilder.h"
-#include "backend/IStageGenerator.h"
+#include "backend/IShapeFixer.h"
#include "backend/ITensorBuilder.h"
-#include "backend/IStage.h"
#include "linear/Linear.h"
namespace neurun
}
public:
- void addStage(std::unique_ptr<backend::IStage> stage);
-
-public:
// TODO Remove the argument `tensor_builders`
void finalize(const linear::Linear *linear, const backend::TensorBuilderSet &tensor_builders);
private:
OperandContext &_operands;
operation::Sequence &_operations;
-
-private:
- std::vector<std::unique_ptr<backend::IStage>> _stages;
};
} // namepsace compiler
#include "compiler/IExecutionBuilder.h"
#include "compiler/BackendResolver.h"
#include "backend/IConfig.h"
+#include "backend/IShapeFixer.h"
#include "util/logging.h"
#include "exec/FunctionSequence.h"
#include <cassert>
}
try
{
- auto _ = backend->stage_gen()->generate(node);
+ backend->shape_fixer()->fix(node);
// always prefer the one, that is supported
_run_cache[backend][node.getName()] = 1;
}
#include "graph/operation/LowerInfo.h"
#include "graph/operand/LowerInfo.h"
-#include "backend/IStageGenerator.h"
+#include "backend/IShapeFixer.h"
#include "backend/IConfig.h"
#include "backend/Backend.h"
#include "compiler/SubTensorInfo.h"
#include "backend/ExecTime.h"
#include "backend/IConfig.h"
-#include "backend/IStageGenerator.h"
#include "backend/IKernelGenerator.h"
#include "backend/IShapeFixer.h"
#include "backend/Backend.h"
bool SupportSubTensorAlloc() override { return false; }
};
-struct MockStageGenerator : IStageGenerator
-{
- std::shared_ptr<ITensorBuilder> tensor_builder() final { return nullptr; }
-};
-
struct MockKernelGenerator : IKernelGenerator
{
std::shared_ptr<ITensorBuilder> tensor_builder() final { return nullptr; }
* Required because we use pointers to backends instead of string identifiers.
*/
MockBackend()
- : Backend{std::make_shared<MockConfig>(), std::make_shared<MockStageGenerator>(),
- std::make_shared<MockKernelGenerator>(), std::make_shared<MockShapeFixer>()} {};
+ : Backend{std::make_shared<MockConfig>(), std::make_shared<MockKernelGenerator>(),
+ std::make_shared<MockShapeFixer>()} {};
};
TEST(ExecTime, roundtrip_ok)