#include "logging.h"
-namespace internal
+namespace neurun
+{
+namespace backend
{
namespace cpu
{
StageGenerator::StageGenerator(
const ::internal::tflite::operand::Set &operand_ctx,
- const std::shared_ptr<::internal::cpu::TensorBuilder> &tensor_builder,
+ const std::shared_ptr<TensorBuilder> &tensor_builder,
const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder)
: _ctx(operand_ctx), _tensor_builder(tensor_builder),
_common_tensor_builder(common_tensor_builder)
assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
(ANEURALNETWORKS_PADDING_VALID == padding_type));
- Stride stride;
+ ::internal::Stride stride;
stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
::internal::tflite::operand::Shape ker_shape{1};
::internal::tflite::operand::Shape bias_shape{1};
- Padding padding;
- Stride stride;
+ ::internal::Padding padding;
+ ::internal::Stride stride;
FuseCode activation;
};
param.bias_shape = _ctx.at(bias_index).shape();
param.stride = stride;
- param.padding =
- (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? same_padding(param.ifm_shape.asFeature(), param.ofm_shape.asFeature(), stride,
- param.ker_shape.asKernel().W, param.ker_shape.asKernel().H)
- : valid_padding();
+ param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? ::internal::same_padding(
+ param.ifm_shape.asFeature(), param.ofm_shape.asFeature(), stride,
+ param.ker_shape.asKernel().W, param.ker_shape.asKernel().H)
+ : ::internal::valid_padding();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
::internal::tflite::operand::Shape ofm_shape{1};
::internal::tflite::operand::Shape ifm_shape{1};
- Padding padding;
- Stride stride;
+ ::internal::Padding padding;
+ ::internal::Stride stride;
FuseCode activation;
};
param.stride.horizontal = hstride;
param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? same_padding(param.ifm_shape.asFeature(), param.ofm_shape.asFeature(),
- param.stride, kw, kh)
- : valid_padding();
+ ? ::internal::same_padding(param.ifm_shape.asFeature(),
+ param.ofm_shape.asFeature(), param.stride, kw, kh)
+ : ::internal::valid_padding();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
::internal::tflite::operand::Shape ofm_shape{1};
::internal::tflite::operand::Shape ifm_shape{1};
- Padding padding;
- Stride stride;
+ ::internal::Padding padding;
+ ::internal::Stride stride;
FuseCode activation;
};
param.stride.horizontal = hstride;
param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? same_padding(param.ifm_shape.asFeature(), param.ofm_shape.asFeature(),
- param.stride, kw, kh)
- : valid_padding();
+ ? ::internal::same_padding(param.ifm_shape.asFeature(),
+ param.ofm_shape.asFeature(), param.stride, kw, kh)
+ : ::internal::valid_padding();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
throw std::runtime_error("Wrong Approach");
}
-} // namespace stage
-} // namespace internal
+} // namespace neurun
+} // namespace backend
+} // namespace cpu
-#ifndef __INTERNAL_CPU_STAGE_GENERATOR_H__
-#define __INTERNAL_CPU_STAGE_GENERATOR_H__
+#ifndef __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
+#define __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
#include "internal/IStageGenerator.h"
#include "internal/common/TensorBuilder.h"
-namespace internal
+namespace neurun
+{
+namespace backend
{
namespace cpu
{
{
public:
StageGenerator(const ::internal::tflite::operand::Set &ctx,
- const std::shared_ptr<::internal::cpu::TensorBuilder> &tensor_builder,
+ const std::shared_ptr<TensorBuilder> &tensor_builder,
const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder);
- virtual std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
+ virtual std::shared_ptr<::internal::ITensorBuilder> tensor_builder() override
+ {
+ return _tensor_builder;
+ }
virtual Stage generate(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
virtual Stage generate(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override;
private:
const ::internal::tflite::operand::Set &_ctx;
- std::shared_ptr<::internal::cpu::TensorBuilder> _tensor_builder;
+ std::shared_ptr<TensorBuilder> _tensor_builder;
std::shared_ptr<::internal::common::TensorBuilder> _common_tensor_builder;
};
} // namespace cpu
-} // namespace internal
+} // namespace backend
+} // namespace neurun
-#endif // __INTERNAL_CPU_STAGE_GENERATOR_H__
+#endif // __NEURUN_BACKEND_CPU_STAGE_GENERATOR_H__
// Add CPU backend
{
- auto cpu_tensor_builder = std::make_shared<::internal::cpu::TensorBuilder>(_plan);
- auto cpu_initializer_gen = std::make_shared<::internal::cpu::InitializerGenerator>(operands);
- auto cpu_stage_gen = std::make_shared<::internal::cpu::StageGenerator>(
- operands, cpu_tensor_builder, _common_tensor_builder);
+ using namespace ::neurun::backend::cpu;
+ auto cpu_tensor_builder = std::make_shared<TensorBuilder>(_plan);
+ auto cpu_initializer_gen = std::make_shared<InitializerGenerator>(operands);
+ auto cpu_stage_gen =
+ std::make_shared<StageGenerator>(operands, cpu_tensor_builder, _common_tensor_builder);
// TODO Do not use magic string for backend id
_gen_map["cpu"] = {cpu_initializer_gen, cpu_stage_gen};