*/
#include "passes/soft_backend/CPPGenerator.h"
+
+#include "core/modelIR/Operation.h"
#include "ModelAnalyzer.h"
#include "SBSerializer.h"
#include "option/Options.h"
string formattedName;
if(td._name.empty())
{
- assert(td._type == TensorType::ORDINARY);
+ assert(td._type == TensorType::TEMPORARY);
formattedName = "Tensor_" + to_string(tmpTensors++);
}
else
{
- if (td._type != TensorType::ORDINARY)
+ if (td._type != TensorType::TEMPORARY)
{
formattedName.append("_");
}
{
out << " std::shared_ptr<Tensor> getOutput();\n";
}
- for (const size_t outId: ma.getNamedTensors())
+ for (const size_t outId: ma.getPersistentTensors())
{
const string &tName = _formattedTensors[outId];
out << " std::shared_ptr<Tensor> get" << tName << "();\n";
const string &tName = _formattedTensors[inId];
out << " Tensor " << tName << ";\n";
}
- for (const size_t outId: ma.getNamedTensors())
+ for (const size_t outId: ma.getPersistentTensors())
{
const string &tName = _formattedTensors[outId];
out << " std::shared_ptr<Tensor> " << tName << ";\n";
for (size_t id: op._outputs)
{
const TensorDescription &td = ma.getTensors()[id];
- assert(td._type != TensorType::IN && "no input nodes should be inserted into inference sequence");
- if (td._type == TensorType::OUT)
+ assert(td._type != TensorType::INPUT && "no input nodes should be inserted into inference sequence");
+ if (td._type == TensorType::PERSISTENT)
continue;
const string &tName = formatted[id];
out << " Tensor " << tName << ";\n";
for (size_t id: argIds)
{
const string &tensorName = _formattedTensors[id];
- if (ma.getTensors()[id]._type == TensorDescription::Type::OUT)
+ if (ma.getTensors()[id]._type == TensorDescription::Type::PERSISTENT)
{
args.push_back("*" + tensorName);
}
using OpDescr = OpDescr;
for (const OpDescr &op: ma.getInferenceSequence())
{
- using Type = OpDescr::Type;
- if (op._type == Type::IN)
+ if (op._op->getType() == mir::Operation::Type::variable)
continue;
// create temporary tensors
printTmpTensors(out, ma, _formattedTensors, op);
const TensorDescription &td = tensors[outputs[0]];
printGetter(out, className, "Output", td);
}
- for (size_t outId: ma.getNamedTensors())
+ for (size_t outId: ma.getPersistentTensors())
{
const string &outName = _formattedTensors[outId];
const TensorDescription &td = tensors[outId];
}
out << "void " << className << "::doInference()\n"
"{\n";
- for (size_t outId: ma.getNamedTensors())
+ for (size_t outId: ma.getPersistentTensors())
{
const string &outName = _formattedTensors[outId];
out << " " << outName << ".reset(new Tensor());\n";
* limitations under the License.
*/
-#include <type_traits>
-#include <limits>
-#include <stack>
-#include <set>
-
#include "ModelAnalyzer.h"
+
#include "core/modelIR/Shape.h"
#include "core/modelIR/ShapeRange.h"
-
#include "core/modelIR/Graph.h"
+
#include "core/modelIR/operations/BatchNormOp.h"
#include "core/modelIR/operations/BiasAddOp.h"
#include "core/modelIR/operations/CappedReluOp.h"
#include "core/modelIR/operations/TransposeOp.h"
#include "core/modelIR/operations/VariableOp.h"
+#include <type_traits>
+#include <limits>
+#include <stack>
+#include <set>
+
using namespace std;
namespace nnc
using namespace nnc::mir;
-void ModelAnalyzer::addOpDescr(Operation* op, const string& opName) {
- OpDescr::Type type = OpDescr::Type::ORDINARY;
- vector<size_t> nodeOutputs;
- const std::string &name = op->getName();
- size_t nodeTid = INVALID_TENSOR_ID;
- if (op->getPrevNodes().empty()) {
- if (auto* p2const = dynamic_cast<ops::ConstantOp*>(op)) {
- type = OpDescr::Type::ORDINARY;
-
- auto* shape = const_cast<Shape*> (&p2const->getOutputShape(0));
- /*
- * FIXME allocateTensor get const Shape
- */
- nodeTid = allocateTensor(name, TensorDescription::Type::ORDINARY, shape);
- } else {
- // process input op
- assert(op->getType() == Operation::Type::variable);
- Shape inputShape = op->getOutputShape(0);
- nodeTid = allocateTensor(name, TensorDescription::Type::IN, &inputShape);
- type = OpDescr::Type::IN;
- _inputs.push_back(nodeTid);
- }
- } else if (!name.empty()) {
+void ModelAnalyzer::addOpDescr(Operation* op, const string& function_name) {
+ vector<size_t> node_output_tensors;
+ const string &op_name = op->getName();
+
+ // process operation outputs
+ size_t node_output_tensor_id = INVALID_TENSOR_ID;
+ if (op->getType() == Operation::Type::variable) {
+ // register input tensor
+ node_output_tensor_id = declareInputTensor(op_name, op->getOutputShape(0));
+ } else if (op->getType() == Operation::Type::constant) {
+ // register constant tensor
+ // it's data is deserialized to described tensor by O(1) at runtime
+ node_output_tensor_id = declareTemporaryTensor();
+ } else if (!op_name.empty() || op->getNextNodes().empty()) {
// process output op
- nodeTid = allocateTensor(name, TensorDescription::Type::OUT);
- _named_tensors.push_back(nodeTid);
- type = OpDescr::Type::OUT;
+ node_output_tensor_id = declarePersistentTensor(op_name);
} else {
- // process ordinary op
- nodeTid = allocateTensor();
+ // process ordinary unnamed operation
+ node_output_tensor_id = declareTemporaryTensor();
}
- assert(nodeTid != INVALID_TENSOR_ID);
- nodeOutputs.push_back(nodeTid);
- // process op outputs
- // consider op as output if it has no consumers
- if (op->getNextNodes().empty() && (type == OpDescr::Type::OUT))
- _outputs.push_back(nodeTid);
- // process op inputs
- vector<size_t> nodeInputs;
+ assert(node_output_tensor_id != INVALID_TENSOR_ID);
+ node_output_tensors.push_back(node_output_tensor_id);
+
+ // process operation inputs
+ vector<size_t> node_input_tensors;
for (const IODescriptor &d: op->getPrevNodes()) {
size_t idx = d.index;
Operation *op = d.op;
assert(_opToDescr.find(op) != _opToDescr.end());
const OpDescr &descr = *_opToDescr[op];
const size_t &inTid = descr._outputs[idx];
- nodeInputs.push_back(inTid);
+ node_input_tensors.push_back(inTid);
}
- _inferenceSequence.push_back({type, op, opName,
- std::move(nodeInputs),
- std::move(nodeOutputs),
+
+ _inferenceSequence.push_back({op, function_name,
+ std::move(node_input_tensors),
+ std::move(node_output_tensors),
0});
_opToDescr[op] = &_inferenceSequence.back();
}
-size_t ModelAnalyzer::allocateTensor(const string &name, TensorDescription::Type type, Shape *shape)
-{
- assert(!(name.empty() && type != TensorDescription::Type::ORDINARY) && "Input or output tensor must have name");
+size_t ModelAnalyzer::declareInputTensor(const std::string& name, const mir::Shape& shape) {
+ assert(!name.empty() && "Input tensor must have name");
size_t id = _allocatedTensors++;
- if (shape == nullptr)
- {
+ _tensors.push_back({id, TensorDescription::Type::INPUT, name, shape});
+ _inputs.push_back(id);
+ return id;
+}
+
+size_t ModelAnalyzer::declarePersistentTensor(const std::string& name) {
+ size_t id = _allocatedTensors++;
+ auto type = TensorDescription::Type::PERSISTENT;
+ if (name.empty()) {
+ // special case for unnamed output tensors
+ _tensors.push_back({id, type, "unnamed_output" + to_string(id), {}});
+ } else {
_tensors.push_back({id, type, name, {}});
}
- else
- {
- _tensors.push_back({id, type, name, *shape});
- }
- assert(_tensors.size() == _allocatedTensors);
+ _persistent_tensors.push_back(id);
+ return id;
+}
+
+size_t ModelAnalyzer::declareTemporaryTensor() {
+ size_t id = _allocatedTensors++;
+ _tensors.push_back({id, TensorDescription::Type::TEMPORARY, "", {}});
return id;
}
Operation* node = *it;
node->accept(this);
}
+
+ for (Operation* out_op: g->collectOutputs()) {
+ OpDescr* descr = _opToDescr[out_op];
+ _outputs.insert(_outputs.end(), descr->_outputs.begin(), descr->_outputs.end());
+ }
}
void ModelAnalyzer::visit(ops::ConcatOp& op) {
* This variable can store inputs, outputs of network and temporary data.
*/
struct TensorDescription {
+ /**
+ * INPUT tensors of this type supposed to be set outside of artifact
+ * PERSISTENT tensors store data after inference process is over, this include NN outputs
+ * TEMPORARY tensors are not accessible outside artifact in any way,
+ * they are created and destructed on demand
+ */
enum class Type {
- IN,
- OUT,
- ORDINARY
+ INPUT,
+ PERSISTENT,
+ TEMPORARY
};
size_t _id;
Type _type;
* @brief OpDescr represents operation call in inference sequence
*/
struct OpDescr {
- enum class Type {
- IN,
- OUT,
- ORDINARY
- };
-
- Type _type;
mir::Operation* _op;
std::string _opName;
// list of input tensors
/**
* @return vector of id's of tensors with unique names taken from Model IR
*/
- const std::vector<size_t>& getNamedTensors() const {
- return _named_tensors;
+ const std::vector<size_t>& getPersistentTensors() const {
+ return _persistent_tensors;
}
/**
/**
* @brief Common function to add function call in inference sequence
* @param op Node representing added call
- * @param name Function name
+ * @param function_name Function name
*
* Inserts information about CG operation into inference sequence: name of operation,
* creates tensors for operation outputs, binds operation inputs with tensors from previous operations
*/
- void addOpDescr(mir::Operation* op, const std::string& name);
+ void addOpDescr(mir::Operation* op, const std::string& function_name);
/**
- * @brief Creates variable in artifact
- * @param name Name of variable
- * @param isNNInput If true this variable can be set by "set" method of artifact
- * @param isNNOutput If true this variable can be gathered by user by "get" method of artifact
- * @return Id of created variable
+ * @brief Declares input tensor in artifact
+ * @param name Name of tensor
+ * @param shape expected shape of input
+ * @return Id of created tensor
*/
- size_t allocateTensor(const std::string& name = std::string(),
- TensorDescription::Type type = TensorDescription::Type::ORDINARY,
- mir::Shape* shape = nullptr);
+ size_t declareInputTensor(const std::string& name, const mir::Shape& shape);
+
+ /**
+ * @brief Declares persistent tensor in artifact
+ * @param name Name of variable, if empty - assigned automaticly
+ * @return Id of created tensor
+ */
+ size_t declarePersistentTensor(const std::string& name);
+
+ /**
+ * @brief Declares temporary tensor in artifact
+ * @return Id of created tensor
+ */
+ size_t declareTemporaryTensor();
std::string _modelName = "NN";
std::list<OpDescr> _inferenceSequence;
size_t _allocatedTensors = 0;
+
+ /// @brief list of artifact inputs
std::vector<size_t> _inputs;
- std::vector<size_t> _named_tensors;
+ /// @brief list of persistent tensors
+ std::vector<size_t> _persistent_tensors;
+ /// @brief list of tensor ids corresponding to NN outputs
std::vector<size_t> _outputs;
std::vector<TensorDescription> _tensors;
std::map<const mir::Operation*, OpDescr*> _opToDescr;