Allocator::Allocator(uint32_t capacity)
{
- assert(!_base && capacity != 0);
-
_base = nnfw::cpp14::make_unique<uint8_t[]>(capacity);
VERBOSE(ALLOC) << "allocation capacity: " << capacity << std::endl;
#include "util/logging.h"
#include "backend/Backend.h"
#include "backend/BackendManager.h"
+#include "backend/ITensorBuilder.h"
#include "model/OperationIndexMap.h"
namespace neurun
return _context_manager.at(backend).get();
}
+ backend::TensorBuilderSet tensor_builders() const
+ {
+ backend::TensorBuilderSet ret;
+ for (const auto &e : _context_manager)
+ {
+ ret.insert(e.second->tensor_builder);
+ }
+ return ret;
+ }
+
const backend::Backend *getBackend(const model::OperationIndex &index) const
{
return getBackendContext(index)->backend;
shape_fixer->fix(*element.subgraph);
});
- auto tensor_builders = linear->planTensors();
+ linear->planTensors();
+
+ auto tensor_builders = linear->backend_resolver()->tensor_builders();
// Prepare tensors
for (auto &tensor_builder : tensor_builders)
shape_fixer->fix(subg);
});
- backend::TensorBuilderSet tensor_builders;
-
graph.operands().iterate([&](const model::OperandIndex &ind, const model::Operand &obj) {
const auto lower_info = graph.getLowerInfo(ind);
for (auto factor : lower_info->def_factors())
// To make this never be deallocated, this is a workaround to use static memory planner
tensor_builder->notifyFirstUse(ind);
}
-
- tensor_builders.insert(tensor_builder);
}
});
+ auto tensor_builders = graph.backend_resolver()->tensor_builders();
+
for (auto &tensor_builder : tensor_builders)
{
tensor_builder->prepare();
}
}
-backend::TensorBuilderSet Linear::planTensors()
+void Linear::planTensors()
{
- backend::TensorBuilderSet tensor_builders;
model::OperandIndexMap<std::shared_ptr<backend::ITensorBuilder>> tensor_builder_map;
// NOTE
tensor_builder->registerTensorInfo(ind, info, layout);
}
- // Prepare tensor builders to be returned
- tensor_builders.insert(tensor_builder);
-
tensor_builder_map[ind] = tensor_builder;
}
});
assert(std::all_of(
def_map.begin(), def_map.end(),
[](std::pair<const model::OperandIndex, uint32_t> it) { return it.second == 0; }));
-
- // Set subtensor information
- // Todo: move this phase outside as optimization phase
- return tensor_builders;
}
void Linear::iterate(const std::function<void(const Element &element)> &fn) const
public:
void accept(model::OperationVisitor &&visitor) const;
- // TODO Should not return TensorBuilderSet
- backend::TensorBuilderSet planTensors();
+ void planTensors();
void iterate(const std::function<void(const Element &element)> &fn) const;
return _backend_resolver->getBackendContext(backend);
}
+ const compiler::BackendResolver *backend_resolver() const { return _backend_resolver.get(); }
+
private:
// TODO Replace these getLowerInfo methods with ones of LowerInfoMap in the future
const graph::operation::LowerInfo *getLowerInfo(const model::SubgraphIndex &index) const;