#include "backend/acl_cl/TensorBuilder.h"
#include <cassert>
+#include <stack>
#include "operand/Object.h"
+#include "internal/Convert.h"
#include "logging.h"
tensor->allocator()->init(info);
_tensors[ind] = tensor;
}
+
+ // To make subtensor, parent tensor must be made first
+ // For this condition, use stack
+ // 1) Push one subtensor index to stack (iterate subtensors)
+ // 2) If tensor at stack top is already made, pop and go to 4)
+ // 3) If tensor pushed at 1) is not made, check parent tensor
+ // 3-1) If parent tensor is already made, we can make child tensor
+ // Make child tensor and pop, go to 4)
+ // 3-2) If parent tensor is not made, we can't make child tensor yet
+ // Push parent tensor index to stack and return to 4)
+ // 4) If stack is empty, return to 1), else return to 2)
+ for (auto &entry : _subtensor_info_map)
+ {
+ graph::operand::Index ind = entry.first;
+
+ std::stack<graph::operand::Index> stack;
+ stack.push(ind);
+
+ while (!stack.empty())
+ {
+ const auto current = stack.top();
+ const auto &info = _subtensor_info_map.at(current);
+
+ // Already generated CLSubTensor
+ if (_subtensors.find(current) != _subtensors.end())
+ {
+ stack.pop();
+ continue;
+ }
+
+ auto parent = info.parent();
+ std::shared_ptr<::arm_compute::ICLTensor> parent_tensor;
+
+ if (_tensors.find(parent) != _tensors.end())
+ {
+ // Parent is allocated as tensor
+ parent_tensor = _tensors[parent];
+ }
+ else if (_subtensors.find(parent) != _subtensors.end())
+ {
+ // Parent is allocated as subtensor
+ parent_tensor = _subtensors[parent];
+ }
+ else
+ {
+ // Cannot find allocated parent tensor: allocate parent first
+ assert(_subtensor_info_map.find(parent) != _subtensor_info_map.end());
+ stack.push(parent);
+ continue;
+ }
+ assert(parent_tensor != nullptr);
+
+ // Child's type should be same with parent
+ assert(info.type().offset() == parent_tensor->info()->quantization_info().offset);
+ assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
+ assert(::internal::asDataType(info.type().type()) == parent_tensor->info()->data_type());
+ auto shape = ::internal::asTensorShape(info.shape());
+
+ // Only support axis: 3 (channel)
+ ::arm_compute::Coordinates coordinates;
+ coordinates.set_num_dimensions(4);
+ assert(info.offset().h() == 0);
+ assert(info.offset().n() == 0);
+ assert(info.offset().w() == 0);
+ coordinates[2] = info.offset().c();
+ auto tensor = std::make_shared<::arm_compute::CLSubTensor>(parent_tensor.get(), shape,
+ coordinates, true);
+ _subtensors[current] = tensor;
+ stack.pop();
+ }
+ }
}
void TensorBuilder::allocate(void)
#define __INTERNAL_CONVERT_H__
#include <arm_compute/core/TensorInfo.h>
+#include <arm_compute/core/SubTensorInfo.h>
#include <arm_compute/core/TensorShape.h>
+#include "graph/operand/Object.h"
#include "graph/operand/Shape.h"
#include "graph/operand/TypeInfo.h"
#include "util/feature/Shape.h"
namespace internal
{
+::arm_compute::TensorShape asTensorShape(const ::neurun::graph::operand::Shape &shape,
+ bool apply_dim_correction = true);
+::arm_compute::DataType asDataType(const ::neurun::graph::operand::DataType &type);
::arm_compute::TensorInfo asTensorInfo(const ::neurun::graph::operand::Shape &shape,
const ::neurun::graph::operand::TypeInfo &typeInfo);