void dimCorrection(const model::OperandIndex &index, bool apply_dim_correction);
private:
+ void buildTensors(void);
+ void buildSubtensors(void);
+
+private:
model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
model::OperandIndexMap<compiler::SubTensorInfo> _subtensor_info_map;
model::OperandIndexMap<bool> _apply_dim_correction_map;
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::prepare(void)
{
- assert(_mem_mgr->tensors().size() == 0);
-
- // TODO Handle SubTensor(subsumption)
- // Currently this TemplTensorBuilder does not have subsumption info yet
- // Allocated subtensor will be mapped to _subtensors instead of _tensors
- assert(_mem_mgr->subtensors().size() == 0);
-
- for (auto &entry : _tensor_info_map)
- {
- auto ind = entry.first;
- const auto &info = entry.second;
- auto tensor_info =
- asTensorInfo(info.shape(), info.typeInfo(), _layout, _apply_dim_correction_map[ind]);
- _mem_mgr->buildTensor(ind, tensor_info);
- }
-
- // To make subtensor, parent tensor must be made first
- // For this condition, use stack
- // 1) Push one subtensor index to stack (iterate subtensors)
- // 2) If tensor at stack top is already made, pop and go to 4)
- // 3) If tensor pushed at 1) is not made, check parent tensor
- // 3-1) If parent tensor is already made, we can make child tensor
- // Make child tensor and pop, go to 4)
- // 3-2) If parent tensor is not made, we can't make child tensor yet
- // Push parent tensor index to stack and return to 4)
- // 4) If stack is empty, return to 1), else return to 2)
- auto &tensors = _mem_mgr->tensors();
- auto &subtensors = _mem_mgr->subtensors();
- for (auto &entry : _subtensor_info_map)
- {
- model::OperandIndex ind = entry.first;
-
- std::stack<model::OperandIndex> stack;
- stack.push(ind);
-
- while (!stack.empty())
- {
- const auto current = stack.top();
- const auto &info = _subtensor_info_map.at(current);
-
- // Already generated SubTensor
- if (subtensors.find(current) != subtensors.end())
- {
- stack.pop();
- continue;
- }
-
- auto parent = info.parent();
- std::shared_ptr<T_ITensor> parent_tensor;
-
- if (tensors.find(parent) != tensors.end())
- {
- // Parent is allocated as tensor
- parent_tensor = tensors[parent];
- }
- else if (subtensors.find(parent) != subtensors.end())
- {
- // Parent is allocated as subtensor
- parent_tensor = subtensors[parent];
- }
- else
- {
- // Cannot find allocated parent tensor: allocate parent first
- assert(_subtensor_info_map.find(parent) != _subtensor_info_map.end());
- stack.push(parent);
- continue;
- }
- assert(parent_tensor != nullptr);
-
- // Child's type should be same with parent
- assert(info.type().offset() == parent_tensor->info()->quantization_info().offset);
- assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
- assert(asDataType(info.type().type()) == parent_tensor->info()->data_type());
- // TODO Change to set data_layout for each front-end
- auto shape = asTensorShape(info.shape(), _layout, _apply_dim_correction_map[current]);
- ::arm_compute::Coordinates coordinates = asTensorCoordinate(info.offset(), _layout);
- auto tensor = std::make_shared<T_SubTensor>(parent_tensor.get(), shape, coordinates, true);
- subtensors[current] = tensor;
- stack.pop();
- }
- }
+ buildTensors();
+ buildSubtensors();
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
return std::move(_mem_mgr);
}
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildTensors(void)
+{
+ assert(_mem_mgr->tensors().size() == 0);
+
+ for (auto &entry : _tensor_info_map)
+ {
+ auto ind = entry.first;
+ const auto &info = entry.second;
+ auto tensor_info =
+ asTensorInfo(info.shape(), info.typeInfo(), _layout, _apply_dim_correction_map[ind]);
+ _mem_mgr->buildTensor(ind, tensor_info);
+ }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::buildSubtensors(void)
+{
+ // TODO Handle SubTensor(subsumption)
+ // Currently this TemplTensorBuilder does not have subsumption info yet
+ // Allocated subtensor will be mapped to _subtensors instead of _tensors
+ assert(_mem_mgr->subtensors().size() == 0);
+
+ // To make subtensor, parent tensor must be made first
+ // For this condition, use stack
+ // 1) Push one subtensor index to stack (iterate subtensors)
+ // 2) If tensor at stack top is already made, pop and go to 4)
+ // 3) If tensor pushed at 1) is not made, check parent tensor
+ // 3-1) If parent tensor is already made, we can make child tensor
+ // Make child tensor and pop, go to 4)
+ // 3-2) If parent tensor is not made, we can't make child tensor yet
+ // Push parent tensor index to stack and return to 4)
+ // 4) If stack is empty, return to 1), else return to 2)
+ auto &tensors = _mem_mgr->tensors();
+ auto &subtensors = _mem_mgr->subtensors();
+ for (auto &entry : _subtensor_info_map)
+ {
+ model::OperandIndex ind = entry.first;
+
+ std::stack<model::OperandIndex> stack;
+ stack.push(ind);
+
+ while (!stack.empty())
+ {
+ const auto current = stack.top();
+ const auto &info = _subtensor_info_map.at(current);
+
+ // Already generated SubTensor
+ if (subtensors.find(current) != subtensors.end())
+ {
+ stack.pop();
+ continue;
+ }
+
+ auto parent = info.parent();
+ std::shared_ptr<T_ITensor> parent_tensor;
+
+ if (tensors.find(parent) != tensors.end())
+ {
+ // Parent is allocated as tensor
+ parent_tensor = tensors[parent];
+ }
+ else if (subtensors.find(parent) != subtensors.end())
+ {
+ // Parent is allocated as subtensor
+ parent_tensor = subtensors[parent];
+ }
+ else
+ {
+ // Cannot find allocated parent tensor: allocate parent first
+ assert(_subtensor_info_map.find(parent) != _subtensor_info_map.end());
+ stack.push(parent);
+ continue;
+ }
+ assert(parent_tensor != nullptr);
+
+ // Child's type should be same with parent
+ assert(info.type().offset() == parent_tensor->info()->quantization_info().offset);
+ assert(info.type().scale() == parent_tensor->info()->quantization_info().scale);
+ assert(asDataType(info.type().type()) == parent_tensor->info()->data_type());
+ // TODO Change to set data_layout for each front-end
+ auto shape = asTensorShape(info.shape(), _layout, _apply_dim_correction_map[current]);
+ ::arm_compute::Coordinates coordinates = asTensorCoordinate(info.offset(), _layout);
+ auto tensor = std::make_shared<T_SubTensor>(parent_tensor.get(), shape, coordinates, true);
+ subtensors[current] = tensor;
+ stack.pop();
+ }
+ }
+}
+
} // namespace acl_common
} // namespace backend
} // namespace neurun