#define __NEURUN_BACKEND_ACL_COMMON_TEMPL_TENSOR_BUILDER_H__
#include <memory>
+#include <queue>
#include <arm_compute/core/Types.h>
#include <backend/ITensorBuilder.h>
namespace acl_common
{
+enum class UsesType
+{
+ FIRST,
+ LAST
+};
+
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
class TemplTensorBuilder : public ITensorBuilder
{
std::shared_ptr<backend::operand::IObject> wrapTensor(const model::OperandIndex &ind) override;
void iterate(const IterateFunction &fn) override;
- // TODO Fill these and remember that these will be for the linear executor for a while
- void preVisit(const model::Operation &) override {}
- void postVisit(const model::Operation &) override {}
+ void preVisit(const model::Operation &node) override;
+ void postVisit(const model::Operation &node) override;
// TODO Consider removing after #5642 fixes
void registerModelObject(const model::OperandIndex &ind, const model::Operand &obj) override;
private:
void buildTensors(void);
void buildSubtensors(void);
+ void validate(void);
private:
model::OperandIndexMap<model::OperandInfo> _tensor_info_map;
model::OperandIndexMap<bool> _apply_dim_correction_map;
model::Layout _layout;
std::unique_ptr<T_AclMemoryManager> _mem_mgr;
+
+ // TODO Consider dividing TensorBuilder into Linear and others
+ const std::string _executor_str;
+
+ // for linear executor
+ std::queue<std::pair<UsesType, model::OperandIndex>> _uses_queue;
+ uint32_t _first_uses_num;
+ model::OperandIndexMap<bool> _first_uses_visit;
+
+ // for subtensors
+ model::OperandIndexMap<uint32_t> _parent_def;
+ model::OperandIndexMap<uint32_t> _parent_uses;
};
} // namespace acl_common
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::TemplTensorBuilder(
T_AclMemoryManager *mem_mgr)
- : _mem_mgr{mem_mgr}
+ : _mem_mgr{mem_mgr}, _executor_str(util::getConfigString(util::config::EXECUTOR)),
+ _first_uses_num(0)
{
assert(_mem_mgr);
}
_tensor_info_map.insert({ind, info});
_apply_dim_correction_map.insert({ind, true});
_layout = layout;
+
+ assert(_first_uses_visit.find(ind) == _first_uses_visit.end());
+ _first_uses_visit[ind] = false;
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
_subtensor_info_map.insert({ind, info});
_apply_dim_correction_map.insert({ind, true});
+
+ assert(_first_uses_visit.find(ind) == _first_uses_visit.end());
+ _first_uses_visit[ind] = false;
+
+ const auto &parent_ind = info.parent();
+
+ // parent_def
+ _parent_def[parent_ind] = 1;
+
+ // parent_use
+ if (_parent_uses.find(parent_ind) == _parent_uses.end())
+ _parent_uses[parent_ind] = 1; // 1 means including parent it-self
+ _parent_uses[parent_ind]++;
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::notifyFirstUse(
- const model::OperandIndex &)
+ const model::OperandIndex &ind)
{
- // TODO FILL THIS
+ _first_uses_num++;
+ _uses_queue.emplace(UsesType::FIRST, ind);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::notifyLastUse(
- const model::OperandIndex &)
+ const model::OperandIndex &ind)
{
- // TODO FILL THIS
+ _uses_queue.emplace(UsesType::LAST, ind);
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::allocate(void)
{
- assert(_tensor_info_map.size() == _mem_mgr->tensors().size());
+ validate();
+ assert(_tensor_info_map.size() == _mem_mgr->tensors().size());
_mem_mgr->allocate();
}
}
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::preVisit(
+ const model::Operation &node)
+{
+ // For now others executor doesn't need this step
+ if (_executor_str != "Linear")
+ {
+ return;
+ }
+
+ auto start_lifetime = [this](const model::OperandIndex &ind) {
+ // a subtensor?
+ if (_subtensor_info_map.find(ind) != _subtensor_info_map.end())
+ {
+ const auto &parent_ind = _subtensor_info_map.at(ind).parent();
+ if (_parent_def[parent_ind])
+ {
+ _parent_def[parent_ind] = 0;
+ _mem_mgr->startLifetime(parent_ind);
+ }
+ }
+ // a parent?
+ else if (_parent_def.find(ind) != _parent_def.end())
+ {
+ if (_parent_def[ind])
+ {
+ _parent_def[ind] = 0;
+ _mem_mgr->startLifetime(ind);
+ }
+ }
+ else
+ {
+ _mem_mgr->startLifetime(ind);
+ }
+ };
+
+ model::OperandIndexMap<bool> outputs_map;
+ for (const auto &ind : node.getOutputs())
+ {
+ assert(_first_uses_visit.find(ind) != _first_uses_visit.end());
+ outputs_map[ind] = _first_uses_visit[ind];
+ }
+
+ // outputs_map's all elements are true?
+ auto outputs_map_all_check = [&outputs_map]() {
+ return std::all_of(outputs_map.begin(), outputs_map.end(),
+ [](std::pair<const model::OperandIndex, bool> it) { return it.second; });
+ };
+
+ std::pair<UsesType, model::OperandIndex> peak;
+ while (!outputs_map_all_check() && (peak = _uses_queue.front()).first == UsesType::FIRST)
+ {
+ _uses_queue.pop();
+ _first_uses_num--;
+
+ const auto &popped_idx = peak.second;
+ start_lifetime(popped_idx);
+
+ outputs_map[popped_idx] = true;
+ _first_uses_visit[popped_idx] = true;
+ }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::postVisit(
+ const model::Operation &node)
+{
+ // For now others executor doesn't need this step
+ if (_executor_str != "Linear")
+ {
+ return;
+ }
+
+ auto finish_lifetime = [this](const model::OperandIndex &ind) {
+ // a subtensor?
+ if (_subtensor_info_map.find(ind) != _subtensor_info_map.end())
+ {
+ const auto &parent_ind = _subtensor_info_map.at(ind).parent();
+ if (--(_parent_uses[parent_ind]) == 0)
+ {
+ _mem_mgr->finishLifetime(parent_ind);
+ }
+ }
+ // a parent?
+ else if (_parent_uses.find(ind) != _parent_uses.end())
+ {
+ if (--(_parent_uses[ind]) == 0)
+ {
+ _mem_mgr->finishLifetime(ind);
+ }
+ }
+ else
+ {
+ _mem_mgr->finishLifetime(ind);
+ }
+ };
+
+ const auto &inputs = node.getInputs();
+ std::pair<UsesType, model::OperandIndex> peak;
+ while ((peak = _uses_queue.front()).first == UsesType::LAST)
+ {
+ const auto &popped_idx = peak.second;
+ if (inputs.contains(popped_idx))
+ {
+ _uses_queue.pop();
+ finish_lifetime(popped_idx);
+ }
+ else
+ {
+ break;
+ }
+ }
+
+ if (_first_uses_num == 0)
+ {
+ while (!_uses_queue.empty())
+ {
+ peak = _uses_queue.front();
+ assert(peak.first == UsesType::LAST);
+
+ _uses_queue.pop();
+
+ finish_lifetime(peak.second);
+ }
+ }
+}
+
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
+void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::validate(void)
+{
+ // For now others executor doesn't need this step
+ if (_executor_str != "Linear")
+ {
+ return;
+ }
+
+ for (auto it : _tensor_info_map)
+ {
+ assert(_first_uses_visit.find(it.first) != _first_uses_visit.end());
+ assert(_first_uses_visit[it.first]);
+ }
+
+ for (auto it : _subtensor_info_map)
+ {
+ assert(_first_uses_visit.find(it.first) != _first_uses_visit.end());
+ assert(_first_uses_visit[it.first]);
+ }
+
+ assert(_uses_queue.size() == 0);
+ assert(_first_uses_num == 0);
+
+ assert(std::all_of(
+ _parent_def.begin(), _parent_def.end(),
+ [](std::pair<const model::OperandIndex, uint32_t> it) { return it.second == 0; }));
+ assert(std::all_of(
+ _parent_uses.begin(), _parent_uses.end(),
+ [](std::pair<const model::OperandIndex, uint32_t> it) { return it.second == 0; }));
+}
+
+// TODO Consider removing after #5642 fixes
+template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::registerModelObject(
const model::OperandIndex &ind, const model::Operand &obj)
{
(void)obj;
}
+// TODO Consider removing after #5642 fixes
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::markConstant(
const model::OperandIndex &ind)
(void)ind;
}
+// TODO Consider removing after #5642 fixes
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
bool TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::isConstant(
const model::OperandIndex &ind)
return false;
}
+// TODO Consider removing after #5642 fixes
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::deallocateConstants()
{
// TODO Fill this
}
+// TODO Consider removing after #5642 fixes
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::notifyFirstUseIf(
const model::OperandIndex &ind)
(void)ind;
}
+// TODO Consider removing after #5642 fixes
template <typename T_ITensor, typename T_Tensor, typename T_SubTensor, typename T_Object>
void TemplTensorBuilder<T_ITensor, T_Tensor, T_SubTensor, T_Object>::notifyLastUseIf(
const model::OperandIndex &ind)