#include "internal/Swizzle.h"
#include "graph/operand/DataType.h"
-namespace internal
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
{
::arm_compute::TensorShape asTensorShape(const ::neurun::graph::operand::Shape &shape,
// However, if the dimension correction is applied to input_to_input_weights with input_size
// equal to 1, it will be changed to 1-D.
// So input_to_input_weights is not used by the weight of FullyConnected.
- res.set(ToARMComputeAxis(rank, axis).value(), shape.dim(axis), apply_dim_correction);
+ res.set(::internal::ToARMComputeAxis(rank, axis).value(), shape.dim(axis),
+ apply_dim_correction);
}
return res;
asQuantizationInfo(typeInfo.scale(), typeInfo.offset()));
}
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
#include "util/feature/Shape.h"
#include "util/kernel/Shape.h"
-namespace internal
+namespace neurun
+{
+namespace backend
+{
+namespace acl_cl
{
::arm_compute::TensorShape asTensorShape(const ::neurun::graph::operand::Shape &shape,
::arm_compute::TensorInfo asTensorInfo(const ::neurun::graph::operand::Shape &shape,
const ::neurun::graph::operand::TypeInfo &typeInfo);
-} // namespace internal
+} // namespace acl_cl
+} // namespace backend
+} // namespace neurun
#endif // __INTERNAL_CONVERT_H__
#include <stack>
#include "operand/Object.h"
-#include "internal/Convert.h"
+#include "Convert.h"
#include "logging.h"
// Child's type should be same with parent
assert(info.type().offset() == parent_tensor->info()->quantization_info().offset);
assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
- assert(::internal::asDataType(info.type().type()) == parent_tensor->info()->data_type());
- auto shape = ::internal::asTensorShape(info.shape());
+ assert(asDataType(info.type().type()) == parent_tensor->info()->data_type());
+ auto shape = asTensorShape(info.shape());
// Only support axis: 3 (channel)
::arm_compute::Coordinates coordinates;
#include "CLTensor.h"
+#include "backend/acl_cl/Convert.h"
+
namespace neurun
{
namespace backend
CLTensor::CLTensor(const compiler::TensorInfo &info)
: _cl_tensor(std::make_shared<arm_compute::CLTensor>())
{
- auto acl_cl_info = ::internal::asTensorInfo(info.shape(), info.typeInfo());
+ auto acl_cl_info = asTensorInfo(info.shape(), info.typeInfo());
allocator()->init(acl_cl_info);
}
#include <arm_compute/runtime/CL/CLScheduler.h>
#include "arm_compute/runtime/CL/CLTensorAllocator.h"
#include "ICLTensor.h"
-#include "internal/Convert.h"
#include "compiler/TensorInfo.h"
namespace neurun
#include "operand/Object.h"
#include "logging.h"
-#include "internal/Convert.h"
-
namespace neurun
{
namespace backend
#include <typeinfo>
-#include "internal/Convert.h"
#include "graph/operand/Set.h"
#include "graph/operation/LowerInfo.h"
#include <typeinfo>
#include "nnfw/std/memory.h"
-#include "internal/Convert.h"
#include "graph/operand/Set.h"
#include "graph/operation/LowerInfo.h"
#include "logging.h"
#include "graph/operand/Index.h"
#include "kernel/cpu/PermuteLayer.h"
#include "backend/cpu/operand/Tensor.h"
-#include "internal/Convert.h"
#include "graph/operand/Layout.h"
#include "backend/BackendManager.h"
#include "backend/interface/IConfig.h"
#include "graph/operation/LowerInfo.h"
#include "backend/interface/IStageGenerator.h"
-#include "internal/Convert.h"
#include "backend/interface/IConfig.h"
#include "compiler/SubTensorInfo.h"
#include "compiler/TensorInfo.h"