// TODO Handle SubTensor(subsumption)
// Currently this TensorBuilder does not have subsumption info yet
+ // Allocated subtensor will be mapped to _subtensors instead of _tensors
assert(_subtensor_info_map.size() == 0);
+ assert(_subtensors.size() == 0);
for (auto &entry : _tensor_info_map)
{
std::shared_ptr<::arm_compute::ITensor> TensorBuilder::tensorAt(const graph::operand::Index &ind)
{
- return _tensors.at(ind);
+ if (_tensors.find(ind) != _tensors.end())
+ {
+ return _tensors.at(ind);
+ }
+ else
+ {
+ return _subtensors.at(ind);
+ }
}
std::shared_ptr<backend::operand::IObject>
}
}
-std::shared_ptr<::arm_compute::CLTensor>
+std::shared_ptr<::arm_compute::ICLTensor>
TensorBuilder::at(const ::neurun::graph::operand::Index &ind)
{
- return _tensors.at(ind);
+ if (_tensors.find(ind) != _tensors.end())
+ {
+ return _tensors.at(ind);
+ }
+ else
+ {
+ return _subtensors.at(ind);
+ }
}
} // namespace acl_cl
#include <unordered_map>
#include <arm_compute/runtime/CL/CLTensor.h>
+#include <arm_compute/runtime/CL/CLSubTensor.h>
namespace neurun
{
wrapTensor(const graph::operand::Index &ind) override;
virtual void iterate(const IterateFunction &fn) override;
- std::shared_ptr<::arm_compute::CLTensor> at(const ::neurun::graph::operand::Index &ind);
+ std::shared_ptr<::arm_compute::ICLTensor> at(const ::neurun::graph::operand::Index &ind);
private:
std::unordered_map<graph::operand::Index, ::arm_compute::TensorInfo> _tensor_info_map;
std::unordered_map<graph::operand::Index, backend::operand::SubTensorInfo> _subtensor_info_map;
std::unordered_map<graph::operand::Index, std::shared_ptr<::arm_compute::CLTensor>> _tensors;
+ std::unordered_map<graph::operand::Index, std::shared_ptr<::arm_compute::CLSubTensor>>
+ _subtensors;
};
} // namespace acl_cl