" " << className << "(const std::string ¶metersPath);\n"
" ~" << className << "();\n";
// generate input setters
+ if (ma.getInputs().size() == 1)
+ {
+ out << " void setInput(const Tensor &inputs);\n";
+ }
for (const size_t inId: ma.getInputs())
{
const string &tName = _formattedTensors[inId];
out << " void set" << tName << "(const Tensor& t);\n";
}
// generate output getters
- for (const size_t outId: ma.getOutputs())
+ if (ma.getOutputs().size() == 1)
+ {
+ out << " std::shared_ptr<Tensor> getOutput();\n";
+ }
+ for (const size_t outId: ma.getNamedTensors())
{
const string &tName = _formattedTensors[outId];
out << " std::shared_ptr<Tensor> get" << tName << "();\n";
const string &tName = _formattedTensors[inId];
out << " Tensor " << tName << ";\n";
}
- for (const size_t outId: ma.getOutputs())
+ for (const size_t outId: ma.getNamedTensors())
{
const string &tName = _formattedTensors[outId];
out << " std::shared_ptr<Tensor> " << tName << ";\n";
}
}
+void CPPCodeGenerator::printSetter(ostream &out, const string &className, const string &setterName, const string &varName)
+{
+ out << "void " << className << "::set" << setterName << "(const Tensor& t)\n"
+ "{\n"
+ " " << varName << " = t;\n"
+ "}\n\n";
+}
+
+void CPPCodeGenerator::printGetter(ostream &out, const string &className, const string &getterName, const string &varName)
+{
+ out << "shared_ptr<Tensor> " << className <<"::get" << getterName << "()\n"
+ "{\n"
+ " return " << varName << ";\n"
+ "}\n\n";
+}
+
// generate inference sequence
void CPPCodeGenerator::materializeInferenceSequence(ostream &out, const ModelAnalyzer &ma)
{
"{\n"
" releaseParameters(_parameters, _paramSize);\n"
"}\n\n";
- // gen input setters
- for (size_t inId: ma.getInputs())
+ // generate input setters
+ // generate main setter if network has only one
+ const auto &inputs = ma.getInputs();
+ if (inputs.size() == 1)
+ {
+ const string &inName = _formattedTensors[inputs[0]];
+ printSetter(out, className, "Input", inName);
+ }
+ // generate setters by names
+ for (size_t inId: inputs)
{
const string &inName = _formattedTensors[inId];
- out << "void " << className << "::set" << inName << "(const Tensor& t)\n"
- "{\n"
- " " << inName << " = t;\n"
- "}\n\n";
+ printSetter(out, className, inName, inName);
}
// gen output getters
- for (size_t outId: ma.getOutputs())
+ // generate main getter if network has only one
+ if (ma.getOutputs().size() == 1)
+ {
+ const string outName = _formattedTensors[ma.getOutputs()[0]];
+ printGetter(out, className, "Output", outName);
+ }
+ for (size_t outId: ma.getNamedTensors())
{
const string &outName = _formattedTensors[outId];
- out << "shared_ptr<Tensor> " << className <<"::get" << outName << "()\n"
- "{\n"
- " return " << outName << ";\n"
- "}\n\n";
+ printGetter(out, className, outName, outName);
}
out << "void " << className << "::doInference()\n"
"{\n";
- for (size_t outId: ma.getOutputs())
+ for (size_t outId: ma.getNamedTensors())
{
const string &outName = _formattedTensors[outId];
out << " " << outName << ".reset(new Tensor());\n";
#include "cpp_generator.h"
+#include "core/modelIR/operations/relu_op.h"
#include "support/PluginException.h"
using namespace std;
using namespace nncc::contrib;
using namespace nncc::contrib::backend::soft;
+using namespace nncc::contrib::core;
using namespace nncc::contrib::core::IR::model;
static bool isFileExists(const string &path)
clopt::CommandLine::getParser()->parseCommandLine(argc, argv, false);
nncc::contrib::core::IR::model::Graph g;
- g.create<ops::VariableOp>("input");
+ INode *input = g.create<ops::VariableOp>("input");
+ input->getOperation()->setOutputShape(0, data::Shape({1,2,3,4}));
+ INode *output = g.create<ops::ReluOp>("output");
+ output->connectInputTo(0, input->getOutput(0));
// test that generator creates output dir and files
if (isFileExists(TEST_DIR))