Change namespace `internal::arm_compute` to `neurun::backend::acl_cl`.
Signed-off-by: Hanjoung Lee <hanjoung.lee@samsung.com>
#include "internal/nnapi/kernel/Reader.h"
#include "util/kernel/IndexIterator.h"
-namespace internal
+namespace neurun
{
-namespace arm_compute
+namespace backend
+{
+namespace acl_cl
{
InitializerGenerator::InitializerGenerator(const ::internal::tflite::operand::Set &ctx) : _ctx(ctx)
};
}
-} // namespace arm_compute
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
-#ifndef __INTERNAL_ARM_COMPUTE_INITIALIZER_GENERATOR_H__
-#define __INTERNAL_ARM_COMPUTE_INITIALIZER_GENERATOR_H__
+#ifndef __NEURUN_BACKEND_ACL_CL_INITIALIZER_GENERATOR_H__
+#define __NEURUN_BACKEND_ACL_CL_INITIALIZER_GENERATOR_H__
#include "internal/IInitializerGenerator.h"
#include "internal/Model.h"
-namespace internal
+namespace neurun
{
-namespace arm_compute
+namespace backend
+{
+namespace acl_cl
{
class InitializerGenerator : public ::internal::IInitializerGenerator
const ::internal::tflite::operand::Set &_ctx;
};
-} // namespace arm_compute
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
-#endif // __INTERNAL_ARM_COMPUTE_INITIALIZER_GENERATOR_H__
+#endif // __NEURUN_BACKEND_ACL_CL_INITIALIZER_GENERATOR_H__
::arm_compute::DimensionRoundingType::FLOOR};
}
-namespace internal
+namespace neurun
{
-namespace arm_compute
+namespace backend
+{
+namespace acl_cl
{
//
//
StageGenerator::StageGenerator(
const ::internal::tflite::operand::Set &ctx,
- const std::shared_ptr<::internal::arm_compute::TensorBuilder> &tensor_builder,
+ const std::shared_ptr<TensorBuilder> &tensor_builder,
const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder)
: _ctx(ctx), _tensor_builder(tensor_builder), _common_tensor_builder(common_tensor_builder)
{
assert((ANEURALNETWORKS_PADDING_SAME == padding_type) ||
(ANEURALNETWORKS_PADDING_VALID == padding_type));
- Stride stride;
+ ::internal::Stride stride;
stride.vertical = _ctx.at(vstride_index).asScalar<int32_t>();
stride.horizontal = _ctx.at(hstride_index).asScalar<int32_t>();
int ker_index;
int bias_index;
- Padding padding;
- Stride stride;
+ ::internal::Padding padding;
+ ::internal::Stride stride;
FuseCode activation;
};
param.bias_index = bias_index.asInt();
param.stride = stride;
- param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
- : valid_padding();
+ param.padding =
+ (padding_type == ANEURALNETWORKS_PADDING_SAME)
+ ? ::internal::same_padding(ifm_shape, ofm_shape, stride, ker_shape.W, ker_shape.H)
+ : ::internal::valid_padding();
param.activation = static_cast<FuseCode>(_ctx.at(activation_index).asScalar<int32_t>());
uint32_t kw;
uint32_t kh;
- Padding padding;
- Stride stride;
+ ::internal::Padding padding;
+ ::internal::Stride stride;
// TODO Add 'activation' field
};
param.stride.horizontal = hstride;
param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
- : valid_padding();
+ ? ::internal::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+ : ::internal::valid_padding();
VERBOSE(MaxPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
VERBOSE(MaxPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
uint32_t kw;
uint32_t kh;
- Padding padding;
- Stride stride;
+ ::internal::Padding padding;
+ ::internal::Stride stride;
// TODO Add 'activation' field
};
param.stride.horizontal = hstride;
param.padding = (padding_type == ANEURALNETWORKS_PADDING_SAME)
- ? same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
- : valid_padding();
+ ? ::internal::same_padding(ifm_shape, ofm_shape, param.stride, kw, kh)
+ : ::internal::valid_padding();
VERBOSE(AvgPool2D) << "IFM_H: " << ifm_shape.H << std::endl;
VERBOSE(AvgPool2D) << "IFM_W: " << ifm_shape.W << std::endl;
throw std::runtime_error("NYI - StageGenerator::generate(TensorConvert::AclToCommon)");
}
-} // namespace arm_compute
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
-#ifndef __INTERNAL_ARM_COMPUTE_STAGE_GENERATOR_H__
-#define __INTERNAL_ARM_COMPUTE_STAGE_GENERATOR_H__
+#ifndef __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
+#define __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
#include "internal/IStageGenerator.h"
#include "backend/acl_cl/TensorBuilder.h"
#include "internal/common/TensorBuilder.h"
-namespace internal
+namespace neurun
{
-namespace arm_compute
+namespace backend
+{
+namespace acl_cl
{
class StageGenerator : public ::internal::IStageGenerator
{
public:
StageGenerator(const ::internal::tflite::operand::Set &ctx,
- const std::shared_ptr<::internal::arm_compute::TensorBuilder> &tensor_builder,
+ const std::shared_ptr<TensorBuilder> &tensor_builder,
const std::shared_ptr<::internal::common::TensorBuilder> &common_tensor_builder);
- virtual std::shared_ptr<ITensorBuilder> tensor_builder() override { return _tensor_builder; }
+ virtual std::shared_ptr<::internal::ITensorBuilder> tensor_builder() override
+ {
+ return _tensor_builder;
+ }
virtual Stage generate(const ::internal::tflite::op::Conv2D::implicit::Node &node) override;
virtual Stage generate(const ::internal::tflite::op::MaxPool2D::implicit::Node &node) override;
private:
const ::internal::tflite::operand::Set &_ctx;
- std::shared_ptr<::internal::arm_compute::TensorBuilder> _tensor_builder;
+ std::shared_ptr<TensorBuilder> _tensor_builder;
std::shared_ptr<::internal::common::TensorBuilder> _common_tensor_builder;
};
-} // namespace arm_compute
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
-#endif // __INTERNAL_ARM_COMPUTE_STAGE_GENERATOR_H__
+#endif // __NEURUN_BACKEND_ACL_CL_STAGE_GENERATOR_H__
#include "internal/arm_compute.h"
-namespace internal
+namespace neurun
{
-namespace arm_compute
+namespace backend
+{
+namespace acl_cl
{
TensorBuilder::TensorBuilder(::internal::arm_compute::Plan &plan) : _plan(plan)
return _tensors.at(ind.asInt());
}
-} // namespace arm_compute
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
-#ifndef __INTERNAL_ARM_COMPUTE_TENSOR_BUILDER_H__
-#define __INTERNAL_ARM_COMPUTE_TENSOR_BUILDER_H__
+#ifndef __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
+#define __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
#include "internal/ITensorBuilder.h"
+#include "internal/arm_compute.h"
#include <unordered_map>
#include <unordered_set>
#include <arm_compute/runtime/CL/CLTensor.h>
-namespace internal
+namespace neurun
{
-namespace arm_compute
+namespace backend
+{
+namespace acl_cl
{
class Plan;
std::unordered_map<int, std::shared_ptr<::arm_compute::CLTensor>> _tensors;
};
-} // namespace arm_compute
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
-#endif // __INTERNAL_ARM_COMPUTE_TENSOR_BUILDER_H__
+#endif // __NEURUN_BACKEND_ACL_CL_TENSOR_BUILDER_H__
// Add arm_compute backend
{
- auto acl_tensor_builder = std::make_shared<::internal::arm_compute::TensorBuilder>(_plan);
- auto acl_initializer_gen =
- std::make_shared<::internal::arm_compute::InitializerGenerator>(operands);
- auto acl_stage_gen = std::make_shared<::internal::arm_compute::StageGenerator>(
- operands, acl_tensor_builder, _common_tensor_builder);
+ using namespace ::neurun::backend::acl_cl;
+ auto acl_tensor_builder = std::make_shared<TensorBuilder>(_plan);
+ auto acl_initializer_gen = std::make_shared<InitializerGenerator>(operands);
+ auto acl_stage_gen =
+ std::make_shared<StageGenerator>(operands, acl_tensor_builder, _common_tensor_builder);
// TODO Do not use magic string for backend id
_gen_map["acl_cl"] = {acl_initializer_gen, acl_stage_gen};