using namespace std;
-namespace nnc
-{
+namespace nnc {
-namespace
-{
+namespace {
/**
* @brief Creates pointer to some output stream to encapsulate resource management into deleter
* @return Pointer output stream
* @throws PluginException if did not succeed
*/
-unique_ptr<ofstream> getStream(const string &path)
-{
+unique_ptr<ofstream> getStream(const string& path) {
unique_ptr<ofstream> ofs(new ofstream(path));
if (ofs->fail())
- {
throw PassException("Can not open code output file: " + path);
- }
return ofs;
}
* @param path Path to desired directory
* @throws PluginException in did not succeed
*/
-void createDir(const string &path)
-{
+void createDir(const string& path) {
int res = mkdir(path.c_str(), S_IRWXU | S_IRWXG | S_IROTH | S_IXOTH);
if (res != 0 && errno != EEXIST)
- {
throw PassException("Failed to create output directory");
- }
}
} // unnamed namespace
-BaseCodeGenerator::BaseCodeGenerator()
-{
- string basePath = cli::artifactDir + "/" + cli::artifactName;
- _headerPath = basePath + ".h";
- _codePath = basePath + ".cpp";
- _paramsPath = basePath + ".params";
+BaseCodeGenerator::BaseCodeGenerator() {
+ string base_path = cli::artifactDir + "/" + cli::artifactName;
+ _headerPath = base_path + ".h";
+ _codePath = base_path + ".cpp";
+ _paramsPath = base_path + ".params";
}
-void BaseCodeGenerator::materializeModelParams(ostream &out, const Serializer &s)
-{
+void BaseCodeGenerator::materializeModelParams(ostream& out, const Serializer& s) {
using namespace params;
// First form a dump header
out.write(header, HEADER_LEN);
if (out.fail())
- {
throw PassException("Failed to write model parameters header");
- }
- auto ¶ms = s.getBuffer();
+ auto& params = s.getBuffer();
out.write(params.data(), params.size());
if (out.fail())
- {
throw PassException("Failed to write model Parameters");
- }
}
-PassData BaseCodeGenerator::run(PassData data)
-{
- auto g = static_cast<mir::Graph *>(data);
+PassData BaseCodeGenerator::run(PassData data) {
+ auto g = static_cast<mir::Graph*>(data);
assert(g);
// visit and analyze graph
createDir(cli::artifactDir);
// Print header
- auto headerStream = getStream(_headerPath);
- materializeHeader(*headerStream, ma);
- headerStream.reset();
+ auto header_stream = getStream(_headerPath);
+ materializeHeader(*header_stream, ma);
+ header_stream.reset();
// Print code
- auto codeStream = getStream(_codePath);
- materializeCode(*codeStream, ma, serializer);
- codeStream.reset();
+ auto code_stream = getStream(_codePath);
+ materializeCode(*code_stream, ma, serializer);
+ code_stream.reset();
// Print model parameters
- auto modelStream = getStream(_paramsPath);
- materializeModelParams(*modelStream, serializer);
- modelStream.reset();
+ auto model_stream = getStream(_paramsPath);
+ materializeModelParams(*model_stream, serializer);
+ model_stream.reset();
return nullptr;
}
} // namespace nnc
-
* @param ma Intermediate artifact information
*/
void CPPCodeGenerator::formatTensorNames(const ModelAnalyzer& ma) {
- int tmpTensors = 0;
+ int tmp_tensors = 0;
for (const TensorDescriptor& td: ma.getTensors()) {
- string formattedName;
- if(td.name.empty()) {
+ string formatted_name;
+ if (td.name.empty()) {
assert(td.type == TensorType::temporary);
- formattedName = "Tensor_" + to_string(tmpTensors++);
+ formatted_name = "Tensor_" + to_string(tmp_tensors++);
} else {
if (td.type != TensorType::temporary)
- formattedName.append("_");
- formattedName.append(td.name);
- for (char& c: formattedName) {
+ formatted_name.append("_");
+ formatted_name.append(td.name);
+ for (char& c: formatted_name) {
if (!isalnum(c))
c = '_';
}
}
- _formattedTensors.push_back(move(formattedName));
+ _formattedTensors.push_back(move(formatted_name));
}
}
* and doInference method that performs actual inference.
*/
void CPPCodeGenerator::materializeHeader(ostream& out, const ModelAnalyzer& ma) {
- string className = ma.getModelName() + "Model";
+ string class_name = ma.getModelName() + "Model";
out.write(cpp_header_types, sizeof(cpp_header_types));
- out << "class " << className << "\n"
+ out << "class " << class_name << "\n"
"{\n"
"public:\n"
- " " << className << "(const std::string& parametersPath);\n"
- " ~" << className << "();\n";
+ " " << class_name << "(const std::string& parametersPath);\n"
+ " ~" << class_name << "();\n";
// generate input setters
if (ma.getInputs().size() == 1)
out << " bool setInput(const Tensor& inputs);\n";
if (ma.getOutputs().size() == 1) {
out << " std::shared_ptr<Tensor> getOutput();\n";
}
- for (const size_t outId: ma.getPersistentTensors()) {
- const string& tName = _formattedTensors[outId];
- out << " std::shared_ptr<Tensor> get" << tName << "();\n";
+ for (const size_t out_id: ma.getPersistentTensors()) {
+ const string& tensor_name = _formattedTensors[out_id];
+ out << " std::shared_ptr<Tensor> get" << tensor_name << "();\n";
}
out << " void doInference();\n\n"
"private:\n"
- " " << className << "() = delete;\n"
- " " << className << "(const " << className << "& orig) = delete;\n"
- " " << className << "& operator=(const " << className << "& orig) = delete;\n";
+ " " << class_name << "() = delete;\n"
+ " " << class_name << "(const " << class_name << "& orig) = delete;\n"
+ " " << class_name << "& operator=(const " << class_name << "& orig) = delete;\n";
// generate input/output tensors
- for (const size_t inId: ma.getInputs()) {
- const string& tName = _formattedTensors[inId];
+ for (const size_t in_tensor_id: ma.getInputs()) {
+ const string& tName = _formattedTensors[in_tensor_id];
out << " Tensor " << tName << ";\n";
}
- for (const size_t outId: ma.getPersistentTensors()) {
- const string& tName = _formattedTensors[outId];
+ for (const size_t out_tensor_id: ma.getPersistentTensors()) {
+ const string& tName = _formattedTensors[out_tensor_id];
out << " std::shared_ptr<Tensor> " << tName << ";\n";
}
// pointer to NN parameters
* @param args arguments to print
*/
static void printOperationArgs(ostream& out, const vector<string>& args) {
- bool insertComma = false;
+ bool insert_comma = false;
for (const string& arg: args) {
- if (insertComma)
+ if (insert_comma)
out << ", ";
- insertComma = true;
+ insert_comma = true;
out << arg;
}
}
void CPPCodeGenerator::gatherOperationArguments(const ModelAnalyzer& ma,
- const vector<size_t>& argIds,
+ const vector<size_t>& arg_ids,
vector<string>& args) {
- for (size_t id: argIds) {
- const string& tensorName = _formattedTensors[id];
+ for (size_t id: arg_ids) {
+ const string& tensor_name = _formattedTensors[id];
if (ma.getTensors()[id].type == TensorDescriptor::Type::persistent)
- args.push_back("*" + tensorName);
+ args.push_back("*" + tensor_name);
else
- args.push_back(tensorName);
+ args.push_back(tensor_name);
}
}
void CPPCodeGenerator::printSetter(ostream& out,
- const string& className,
- const string& setterName,
+ const string& class_name,
+ const string& setter_name,
const TensorDescriptor& td) {
- const string& varName = _formattedTensors[td.id];
- out << "bool " << className << "::set" << setterName << "(const Tensor& t)\n"
+ const string& var_name = _formattedTensors[td.id];
+ out << "bool " << class_name << "::set" << setter_name << "(const Tensor& t)\n"
"{\n";
// need to insert input correctness check
const mir::Shape expected = td.shape;
for (int i = 0; i < rank; ++i)
out << " " << "if (t.getShape()[" << i << "] != " << expected.dim(i) << ") return false;\n";
}
- out << " " << varName << " = t;\n"
+ out << " " << var_name << " = t;\n"
" return true;\n"
"}\n\n";
}
void CPPCodeGenerator::printGetter(ostream& out,
- const string& className,
- const string& getterName,
+ const string& class_name,
+ const string& getter_name,
const TensorDescriptor& td) {
- const string& varName = _formattedTensors[td.id];
- out << "shared_ptr<Tensor> " << className <<"::get" << getterName << "()\n"
+ const string& var_name = _formattedTensors[td.id];
+ out << "shared_ptr<Tensor> " << class_name <<"::get" << getter_name << "()\n"
"{\n"
- " return " << varName << ";\n"
+ " return " << var_name << ";\n"
"}\n\n";
}
return;
// materialize call
out << " " << call->funcName << "(";
- const auto& prevNodes = call->mirOp->getPrevNodes();
- const auto& outTensors = call->outputs;
+ const auto& prev_nodes = call->mirOp->getPrevNodes();
+ const auto& out_tensors = call->outputs;
vector<string> args;
- args.reserve(prevNodes.size() + outTensors.size() + 1);
+ args.reserve(prev_nodes.size() + out_tensors.size() + 1);
// gather output arguments
gatherOperationArguments(ma, call->outputs, args);
// parameters offset
* Function writes to output stream needed code snippets, and implementations of artifact class functions.
*/
void CPPCodeGenerator::materializeCode(ostream& out, const ModelAnalyzer& ma, const Serializer& s) {
- string className = ma.getModelName() + "Model";
+ string class_name = ma.getModelName() + "Model";
out << "#include \"" << cli::artifactName << ".h\"\n";
out.write(cpp_leaky_relu, sizeof(cpp_leaky_relu));
// gen NN constructor
- out << className << "::" << className << "(const string& parametersPath)\n"
+ out << class_name << "::" << class_name << "(const string& parametersPath)\n"
"{\n"
" readParameters(_parameters, _paramSize, parametersPath, "
<< s.getFormatVersion() << ", " << s.getModelHash() << ");\n"
"}\n\n";
// gen NN destructor
- out << className << "::~" << className << "()\n"
+ out << class_name << "::~" << class_name << "()\n"
"{\n"
" releaseParameters(_parameters, _paramSize);\n"
"}\n\n";
const auto& tensors = ma.getTensors();
if (inputs.size() == 1) {
const TensorDescriptor& td = tensors[inputs[0]];
- printSetter(out, className, "Input", td);
+ printSetter(out, class_name, "Input", td);
}
// generate setters by names
- for (size_t inId: inputs) {
- const string& inName = _formattedTensors[inId];
- const TensorDescriptor& td = tensors[inId];
- printSetter(out, className, inName, td);
+ for (size_t input_tensor_id: inputs) {
+ const string& input_tensor_name = _formattedTensors[input_tensor_id];
+ const TensorDescriptor& td = tensors[input_tensor_id];
+ printSetter(out, class_name, input_tensor_name, td);
}
// gen output getters
const auto& outputs = ma.getOutputs();
if (outputs.size() == 1) {
const TensorDescriptor& td = tensors[outputs[0]];
- printGetter(out, className, "Output", td);
+ printGetter(out, class_name, "Output", td);
}
- for (size_t outId: ma.getPersistentTensors()) {
- const string& outName = _formattedTensors[outId];
- const TensorDescriptor& td = tensors[outId];
- printGetter(out, className, outName, td);
+ for (size_t output_tensor_id: ma.getPersistentTensors()) {
+ const string& output_tensor_name = _formattedTensors[output_tensor_id];
+ const TensorDescriptor& td = tensors[output_tensor_id];
+ printGetter(out, class_name, output_tensor_name, td);
}
- out << "void " << className << "::doInference()\n"
+ out << "void " << class_name << "::doInference()\n"
"{\n";
- for (size_t outId: ma.getPersistentTensors()) {
- const string& outName = _formattedTensors[outId];
- out << " " << outName << ".reset(new Tensor());\n";
+ for (size_t output_tensor_id: ma.getPersistentTensors()) {
+ const string& output_tensor_name = _formattedTensors[output_tensor_id];
+ out << " " << output_tensor_name << ".reset(new Tensor());\n";
}
// gen inference sequence
* This file contains common constants and classes for code generator and artifact
*/
-namespace params
-{
+namespace params {
const int MAGIC_LEN = 4;
const int VERSION_LEN = 4;
const int HASH_LEN = 4;
/**
* Type of Edge Handling for pooling operation
*/
-enum class PoolBorderType
-{
+enum class PoolBorderType {
ZEROFILLED, // elements outside of input considered zero
EMPTY // Consider that there are no elements outside of input shape
};
Operation* op, const string& function_name, std::vector<size_t> aux_args) {
vector<size_t> node_output_tensors;
- const string &op_name = op->getName();
+ const string& op_name = op->getName();
// process operation outputs
size_t node_output_tensor_id = INVALID_TENSOR_ID;
size_t idx = d.index;
Operation* prev_op = d.op;
assert(_opToDescr.find(prev_op) != _opToDescr.end());
- assert(dynamic_cast<const CallFunction*>(_opToDescr[prev_op]));
- const CallFunction* call = static_cast<const CallFunction*>(_opToDescr[prev_op]);
- const size_t &inTid = call->outputs[idx];
- node_input_tensors.push_back(inTid);
+ const CallFunction* call = dynamic_cast<const CallFunction*>(_opToDescr[prev_op]);
+ assert(call);
+ const size_t& in_tensor_id = call->outputs[idx];
+ node_input_tensors.push_back(in_tensor_id);
}
std::copy(aux_args.begin(), aux_args.end(), std::back_inserter(node_input_tensors));
for (size_t pos = 0; pos < post_order.size(); ++pos) {
const unique_ptr<Action>& action = post_order[pos];
- assert(dynamic_cast<CallFunction*>(action.get()));
- const CallFunction& call = *static_cast<CallFunction*>(action.get());
+ const CallFunction* call = dynamic_cast<CallFunction*>(action.get());
+ assert(call);
// update def info
- for (size_t output_tensor_id : call.outputs) {
+ for (size_t output_tensor_id : call->outputs) {
const TensorDescriptor& td = _tensors[output_tensor_id];
if (td.type != TensorDescriptor::Type::temporary)
continue;
}
// update usage info
- for (size_t input_tensor_id : call.inputs) {
+ for (size_t input_tensor_id : call->inputs) {
const TensorDescriptor& td = _tensors[input_tensor_id];
if (td.type != TensorDescriptor::Type::temporary)
continue;
gatherDefUseInfo(_inferenceSequence, first_def, last_use);
// insert memory operations
+ // Every iteration of loop contains three steps:
+ // 1) insert constructors of temporary tensors used in current operations
+ // and not used in inference sequence before
+ // 2) insert operation call
+ // 3) insert destructors of temporary tensors unused after current operation
std::vector<unique_ptr<Action>> old_inference_seq;
old_inference_seq.swap(_inferenceSequence);
_inferenceSequence.reserve(old_inference_seq.size());
for (size_t pos = 0; pos < old_inference_seq.size(); ++pos) {
unique_ptr<Action>& action = old_inference_seq[pos];
- const CallFunction& call = *static_cast<CallFunction*>(action.get());
+ const CallFunction* call = dynamic_cast<CallFunction*>(action.get());
+ assert(call);
- // allocate required tensors
- for (size_t output_tensor_id : call.outputs) {
+ // construct required temporary tensors
+ for (size_t output_tensor_id : call->outputs) {
const TensorDescriptor& td = _tensors[output_tensor_id];
assert(td.id == output_tensor_id);
if (td.type != TensorDescriptor::Type::temporary)
// Insert operation call
_inferenceSequence.push_back(std::move(action));
- // remove unused tensors
- for (size_t input_tensor_id : call.inputs) {
+ // destroy unused temporary tensors
+ for (size_t input_tensor_id : call->inputs) {
const TensorDescriptor& td = _tensors[input_tensor_id];
assert(td.id == input_tensor_id);
if (td.type != TensorDescriptor::Type::temporary)
void ModelAnalyzer::collectOutputs(const mir::Graph* g) {
for (ops::OutputOp* out_op: g->getOutputs()) {
- assert(dynamic_cast<const CallFunction*>(_opToDescr[out_op]));
- auto op_call = static_cast<const CallFunction*>(_opToDescr[out_op]);
+ auto op_call = dynamic_cast<const CallFunction*>(_opToDescr[out_op]);
+ assert(op_call);
_outputs.insert(_outputs.end(), op_call->outputs.begin(), op_call->outputs.end());
}
}
Operation* node = top.first;
auto edge = top.second++;
auto next_nodes = node->getNextNodes();
- if (edge == next_nodes.size()){
+ if (edge == next_nodes.size()) {
// this node is fully analyzed, push it into RPO and pop from stack
post_order.push_back(node);
s.pop();
* it is easier to implement different types of pooling by different functions
*/
void ModelAnalyzer::visit(ops::PoolOp& op) {
- const char* funcName = nullptr;
+ const char* func_name = nullptr;
switch (op.getPoolingType()) {
case ops::PoolOp::PoolingType::MAX:
- funcName = "maxPool";
+ func_name = "maxPool";
break;
case ops::PoolOp::PoolingType::AVG:
- funcName = "avgPool";
+ func_name = "avgPool";
break;
default:
assert(false && "unsupported pooling type");
}
- appendOperationToInference(&op, funcName);
+ appendOperationToInference(&op, func_name);
}
void ModelAnalyzer::visit(ops::FullyConnectedOp& op) {
void ModelAnalyzer::visit(mir::ops::ElementwiseOp& op) {
const char* func_name = nullptr;
- switch ( op.getOpType() ) {
+ switch (op.getOpType()) {
case ops::ElementwiseOp::OpType::add:
func_name = "ElementWise<Add>";
break;
namespace nnc {
namespace mir {
- class Graph;
+class Graph;
}
/**
/**
* @brief Convert enum to it's underlying type
* @tparam E Enum type
- * @param enumVal Value of enum
+ * @param enum_value Value of enum
* @return Integer value that correspond to enumVal
*/
-template <class E>
-typename underlying_type<E>::type etoi(E enumVal) {
- return static_cast<typename underlying_type<E>::type>(enumVal);
+template <typename E>
+typename underlying_type<E>::type etoi(E enum_value) {
+ return static_cast<typename underlying_type<E>::type>(enum_value);
}
void Serializer::serializeShape(const Shape& s) {
assert(etoi(t.getDataType()) < MAX_ENUM_VAL);
serializeT<int32_t>(etoi(t.getDataType()));
// seriazlie data size
- size_t eSize = t.getElementSize();
- assert(eSize <= MAX_DIMS);
- serializeT<int32_t>(eSize);
+ size_t element_size = t.getElementSize();
+ assert(element_size <= MAX_DIMS);
+ serializeT<int32_t>(element_size);
// serialize shape
const Shape& shape = t.getShape();
serializeShape(shape);
// serialize actual data
- size_t tSize = eSize * shape.numElements();
+ size_t data_size = element_size * shape.numElements();
- size_t oldSize = _buffer.size();
- _buffer.reserve(oldSize + tSize);
+ size_t old_serialized_data_size = _buffer.size();
+ _buffer.reserve(old_serialized_data_size + data_size);
for (const Index& idx: ShapeRange(shape)) {
- packData(t.at(idx), eSize);
+ packData(t.at(idx), element_size);
}
}
-template<class Op>
-void Serializer::serializePads(const Op& op, int32_t padsRank)
-{
- assert(padsRank <= MAX_DIMS);
- serializeT<int32_t>(padsRank);
- for (int i = 0; i < static_cast<int>(padsRank); ++i)
- {
+template <typename Op>
+void Serializer::serializePads(const Op& op, int32_t number_of_pads) {
+ assert(number_of_pads <= MAX_DIMS);
+ serializeT<int32_t>(number_of_pads);
+ for (int i = 0; i < static_cast<int>(number_of_pads); ++i) {
auto pad = op.getPaddingBefore().at(i);
assert(pad <= MAX_DIM_SIZE);
assert(pad >= 0);
// serialize strindes
serializeShape(op.getStrides());
// serialize pads
- int32_t padsRank = 2; // windowShape.rank();
- serializePads(op, padsRank);
+ int32_t number_of_pads = 2; // windowShape.rank();
+ serializePads(op, number_of_pads);
// serialize border type
- PoolBorderType borderType;
+ PoolBorderType border_type;
switch (op.getBorderType()) {
case ops::PoolOp::BorderType::EMPTY:
- borderType = PoolBorderType::EMPTY;
+ border_type = PoolBorderType::EMPTY;
break;
case ops::PoolOp::BorderType::ZEROFILLED:
- borderType = PoolBorderType::ZEROFILLED;
+ border_type = PoolBorderType::ZEROFILLED;
break;
default:
throw PassException("Unsupported border type in pooling");
}
- serializeT<int32_t>(etoi(borderType));
+ serializeT<int32_t>(etoi(border_type));
// serialize output shape
serializeShape(op.getOutputShape(0));
}
// serialize strides
serializeShape(op.getStrides());
// serialize pads
- int32_t padsRank = 2; // op.getInputShape(0).rank();
- serializePads(op, padsRank);
+ int32_t number_of_pads = 2; // op.getInputShape(0).rank();
+ serializePads(op, number_of_pads);
// serialize output shape
serializeShape(op.getOutputShape(0));
}
// serialize num dimensions
serializeT<int32_t>(op.getNumDim());
- for(int i = 0; i < num_dims; i++) {
+ for (int i = 0; i < num_dims; i++) {
std::pair<int32_t, int32_t> pair = op.getPaddingForDim(num_dims - 1 - i);
serializeT<int32_t>(pair.first);
serializeT<int32_t>(pair.second);
#include <vector>
#include <cstdint>
-namespace nnc
-{
+namespace nnc {
/**
* @brief Serializer of network parameters for soft backend
* To gather this vector use `getBuffer` method.
* Objects of this class are one-off and not designed to serialize more than one IR
*/
-class Serializer: public mir::IVisitor {
+class Serializer : public mir::IVisitor {
public:
void visit(mir::ops::BatchNormOp& op) override;
void serialize(std::vector<std::unique_ptr<sir::Action>>& inference_sequence);
- const std::vector<char> &getBuffer() const {
+ const std::vector<char>& getBuffer() const {
return _buffer;
}
uint32_t getModelHash() const {
return _modelHash;
}
+
private:
/**
* @brief Low level function to serialize untyped data buffer
* @param padsRank Number of pads to serialize
*/
template <class Op>
- void serializePads(const Op& op, int32_t padsRank);
+ void serializePads(const Op& op, int32_t number_of_pads);
sir::CallFunction* _curOp;
const uint32_t _formatVersion = 1;
struct CallFunction : public Action {
CallFunction(mir::Operation* op, const std::string& func_name,
- std::vector<size_t>&& inputs, std::vector<size_t>&& outputs)
+ std::vector<size_t>&& inputs, std::vector<size_t>&& outputs)
: Action(Type::callFunction),
mirOp(op),
funcName(func_name),
* This test is not intended to check correctness of generated artifact
*/
-#include <iostream>
-#include <fstream>
-#include <string>
-
-#include <stdlib.h>
-
#include "support/CommandLine.h"
#include "option/Options.h"
#include "passes/soft_backend/CPPGenerator.h"
+#include <iostream>
+#include <fstream>
+#include <string>
+
+#include <stdlib.h>
+
// This header generated and contains array with test_main.def contents
#include "test_main.generated.h"
Operation* output_op = g.create<ops::OutputOp>("out", relu_op->getOutput(0));
}
-static void checkFileExists(const string &path)
-{
+static void checkFileExists(const string& path) {
ifstream f(path);
- if (!f.good())
- {
+ if (!f.good()) {
cerr << "file " << path << " not created\n";
exit(1);
}
}
-static void createMain(const string &path, const string &headerPath)
-{
+static void createMain(const string& path, const string& header_path) {
ofstream out(path);
- if (!out.good())
- {
+ if (!out.good()) {
cerr << "Main file " << path << " not created\n";
exit(1);
}
- out << "#include \"" << headerPath << "\"\n";
+ out << "#include \"" << header_path << "\"\n";
out.write(test_main, sizeof(test_main));
}
-int main(int argc, const char *argv[])
-{
+int main(int argc, const char* argv[]) {
cli::CommandLine::getParser()->parseCommandLine(argc, argv, false);
- std::string outputDir = cli::artifactDir;
- std::string artifactName = cli::artifactName;
+ std::string output_dir = cli::artifactDir;
+ std::string artifact_name = cli::artifactName;
Graph g;
fillGraph(g);
- nnc::CPPCodeGenerator cppCodeGenerator;
- cppCodeGenerator.run(&g);
+ nnc::CPPCodeGenerator cpp_code_generator;
+ cpp_code_generator.run(&g);
- string basePath = outputDir + "/" + artifactName;
+ string base_path = output_dir + "/" + artifact_name;
- string codePath = basePath + ".cpp";
- string headerPath = basePath + ".h";
- string mainPath = basePath + "_main.cpp";
+ string code_path = base_path + ".cpp";
+ string header_path = base_path + ".h";
+ string main_path = base_path + "_main.cpp";
- checkFileExists(codePath);
- checkFileExists(headerPath);
- checkFileExists(basePath + ".params");
+ checkFileExists(code_path);
+ checkFileExists(header_path);
+ checkFileExists(base_path + ".params");
- createMain(mainPath, artifactName + ".h");
+ createMain(main_path, artifact_name + ".h");
- string targetCompiler = "g++ -Wall --std=c++11";
+ string target_compiler = "g++ -Wall --std=c++11";
- string compilerCommand = targetCompiler + " -I" + outputDir + " " + mainPath + " " + codePath;
+ string compiler_command =
+ target_compiler + " -I" + output_dir + " " + main_path + " " + code_path;
// call compiler
- int res = system(compilerCommand.c_str());
+ int res = system(compiler_command.c_str());
- if (res == -1)
- {
+ if (res == -1) {
cerr << "failed to call compiler\n";
return 2;
}
- if (res != 0)
- {
- cerr << "compiler did not succeed with error code " << res << ": " << compilerCommand << "\n";
+ if (res != 0) {
+ cerr << "compiler did not succeed with error code " << res << ": " << compiler_command << "\n";
return 3;
}
return 0;
#include "gtest/gtest.h"
template <class List>
-static inline void checkListShapeEq(List list, Shape shape, index_t volume)
-{
+static inline void checkListShapeEq(List list, Shape shape, index_t volume) {
ASSERT_EQ(static_cast<size_t>(shape.getDims()), list.size());
index_t idx = 0;
- for (auto el: list)
- {
+ for (auto el: list) {
ASSERT_EQ(shape[idx], el);
idx++;
}
ASSERT_EQ(shape.getNumElems(), volume);
}
-TEST(SOFT_BACKEND, shape_and_index)
-{
+TEST(SOFT_BACKEND, shape_and_index) {
auto list = {2,3,4};
index_t volume = std::accumulate(list.begin(), list.end(), 1, std::multiplies<index_t>());
Shape s1(list);
ASSERT_EQ(s3.getNumElems(), volume);
}
-TEST(SOFT_BACKEND, tensor)
-{
+TEST(SOFT_BACKEND, tensor) {
// test reshape
Tensor t1;
ASSERT_EQ(t1.getShape().getNumElems(), 0);
- const index_t t1Height = 2;
- const index_t t1Width = 4;
- t1.reshape(Shape{t1Height, t1Width});
- ASSERT_EQ(t1.getShape().getNumElems(), t1Height * t1Width);
+ const index_t tensor1_height = 2;
+ const index_t tensor1_width = 4;
+ t1.reshape(Shape{tensor1_height, tensor1_width});
+ ASSERT_EQ(t1.getShape().getNumElems(), tensor1_height * tensor1_width);
// test at functions
- float expectedSum = 0;
- for (index_t i = 0; i < t1Height; ++i)
- for (index_t j = 0; j < t1Width; ++j)
- {
+ float expected_sum = 0;
+ for (index_t i = 0; i < tensor1_height; ++i)
+ for (index_t j = 0; j < tensor1_width; ++j) {
index_t elem = (i + 1) * (j + 1);
- expectedSum += elem;
+ expected_sum += elem;
t1.at({i, j}) = elem;
}
float sum = 0;
- for (index_t i = 0; i < t1Height; ++i)
- for (index_t j = 0; j < t1Width; ++j)
- {
+ for (index_t i = 0; i < tensor1_height; ++i)
+ for (index_t j = 0; j < tensor1_width; ++j) {
sum += t1.at({i, j});
}
- ASSERT_EQ(sum, expectedSum);
+ ASSERT_EQ(sum, expected_sum);
// test construction with shape
- const index_t t2Height = 3;
- const index_t t2Width = 4;
- Tensor t2({t2Height, t2Width});
- ASSERT_EQ(t2.getShape().getNumElems(), t2Height * t2Width);
+ const index_t tensor2_height = 3;
+ const index_t tensor2_width = 4;
+ Tensor t2({tensor2_height, tensor2_width});
+ ASSERT_EQ(t2.getShape().getNumElems(), tensor2_height * tensor2_width);
// test unmanaged tensor
- const index_t t3Depth = 2;
- const index_t t3Height = 2;
- const index_t t3Width = 3;
+ const index_t tensor3_depth = 2;
+ const index_t tensor3_height = 2;
+ const index_t tensor3_width = 3;
std::vector<float> data({1.0, 2.0, 4.0});
- data.resize(t3Depth * t3Height * t3Width);
- float *dataPtr = data.data();
- Tensor t3(Shape({t3Depth, t3Height, t3Width}), dataPtr);
- ASSERT_EQ(t3.getShape().getNumElems(), t3Depth * t3Height * t3Width);
+ data.resize(tensor3_depth * tensor3_height * tensor3_width);
+ float* data_ptr = data.data();
+ Tensor t3(Shape({tensor3_depth, tensor3_height, tensor3_width}), data_ptr);
+ ASSERT_EQ(t3.getShape().getNumElems(), tensor3_depth * tensor3_height * tensor3_width);
sum = 0;
- for (index_t k = 0; k < t3Depth; ++k)
- for (index_t i = 0; i < t3Height; ++i)
- for (index_t j = 0; j < t3Width; ++j)
- {
+ for (index_t k = 0; k < tensor3_depth; ++k)
+ for (index_t i = 0; i < tensor3_height; ++i)
+ for (index_t j = 0; j < tensor3_width; ++j) {
sum += t3.at({k, i, j});
}
- ASSERT_EQ(sum, std::accumulate(dataPtr, dataPtr + t3.getShape().getNumElems(), 0.0f));
+ ASSERT_EQ(sum, std::accumulate(data_ptr, data_ptr + t3.getShape().getNumElems(), 0.0f));
// test tensor copy
const index_t t4Width = 4;
Tensor t4({t4Width});
t4 = t3;
- for (index_t k = 0; k < t3Depth; ++k)
- for (index_t i = 0; i < t3Height; ++i)
- for (index_t j = 0; j < t3Height; ++j)
- {
+ for (index_t k = 0; k < tensor3_depth; ++k)
+ for (index_t i = 0; i < tensor3_height; ++i)
+ for (index_t j = 0; j < tensor3_height; ++j) {
ASSERT_EQ(t3.at({k, i, j}), t4.at({k, i, j}));
}
}
/**
* @brief Run selected operation, used to make code in tests more compact and fit getReferenceTensor format
*/
-template <class Operation, class ...Args>
+template <typename Operation, typename ...Args>
Tensor run(Operation op, const Args &...args) {
Tensor output;
op(output, args...);
/**
* @brief This function creates test graph, runs interpeter, specifies artifact operation and compares results
*/
-template <class TestFunc, class ...Args>
+template <typename TestFunc, typename ...Args>
void createAndRunTestGraph(
function<mir::Operation*(mir::Graph&,
const std::vector<mir::IODescriptor>& inputs)> op_generator,
padding, padding, border);
};
-template <irOps::PoolOp::PoolingType poolT, class Func>
+template <irOps::PoolOp::PoolingType poolT, typename Func>
static void genericPoolTest(Func test_func, const vector<irOps::PoolOp::BorderType> borders) {
// Iterate over window width, window height
// channels
using namespace nnc;
using namespace nnc::mir;
-static bool isFileExists(const string &path)
-{
+static bool isFileExists(const string& path) {
ifstream f(path);
return f.good();
}
-static void deleteFile(const string &path)
-{
+static void deleteFile(const string& path) {
int res = remove(path.c_str());
assert(!res && "failed to remove file");
(void)res;
}
-int removeRec(const char *fpath, const struct stat *sb, int typeflag, struct FTW *ftwbuf)
-{
+int removeRec(const char* fpath, const struct stat* /*sb*/,
+ int /*typeflag*/, struct FTW* /*ftwbuf*/) {
deleteFile(fpath);
return 0;
}
-static void deleteDir(const string &path)
-{
+static void deleteDir(const string& path) {
int res = nftw(path.c_str(), removeRec, 1, FTW_DEPTH | FTW_PHYS);
assert(!res && "failed to remove dir");
(void)res;
}
-static void checkOutputExists(const string &commonPath)
-{
- ASSERT_TRUE(isFileExists(commonPath + ".h"));
- ASSERT_TRUE(isFileExists(commonPath + ".cpp"));
- ASSERT_TRUE(isFileExists(commonPath + ".params"));
+static void checkOutputExists(const string& common_path) {
+ ASSERT_TRUE(isFileExists(common_path + ".h"));
+ ASSERT_TRUE(isFileExists(common_path + ".cpp"));
+ ASSERT_TRUE(isFileExists(common_path + ".params"));
}
-static void emptyFile(const string &path)
-{
+static void emptyFile(const string& path) {
ofstream of(path);
}
-TEST(Generator, check_generator_call)
-{
+TEST(Generator, check_generator_call) {
// assume here that c++ and c code generators behave identically in terms of parameters check
// test only c++ generator
#define TEST_DIR "output_dir"
#define TEST_NAME "someName"
#define BASE_NAME TEST_DIR "/" TEST_NAME
- const char *argv[] = {"soft_backend_test",
+ const char* argv[] = {"soft_backend_test",
"-d", TEST_DIR,
"-o", TEST_NAME,
nullptr};
// test that generator creates output dir and files
if (isFileExists(TEST_DIR))
- {
deleteDir(TEST_DIR);
- }
assert(!isFileExists(TEST_DIR) && "remove output dir");
CPPCodeGenerator cpp_code_generator;
cpp_code_generator.run(&g);