using namespace nnc::mir;
-std::vector<TensorVariant> &NNInterpreter::var(size_t id) { return vars[id]; }
+std::vector<std::reference_wrapper<const TensorVariant>>
+NNInterpreter::getInputTensors(const Operation& op) {
+ std::vector<std::reference_wrapper<const TensorVariant>> tensors;
+ for (IODescriptor ir_tensor : op.getPrevNodes())
+ tensors.emplace_back(_opResults.at(ir_tensor.op->getId()).at(ir_tensor.index));
+ return tensors;
+}
+
+void NNInterpreter::setOutputTensors(const Operation& op, std::vector<TensorVariant>&& outputs) {
+ _opResults.emplace(op.getId(), std::move(outputs));
+}
-static void dumpIndex (Index ndx) {
+static void dumpIndex(Index ndx) {
for (int i = 0; i < ndx.rank(); i++) {
std::cout << (i ? "," : "(") << ndx.at(i);
}
}
#if(0)
- #define DUMP(x, y) dump(x, (y))
+#define DUMP(x, y) dump(x, (y))
#else
- #define DUMP(x, y)
+#define DUMP(x, y)
#endif
void NNInterpreter::dump(Operation& op, bool all) {
// TODO: in theory there could be several outputs from the given 'op'.
- TensorVariant tensor = var(op.getId())[0];
+ TensorVariant tensor = _opResults.at(op.getId()).at(0);
auto shape = tensor.getShape();
std::cout << "Tensor '" <<
- (op.getNextNodes().size() ? op.getNextNodes()[0]->getName() : "output") <<
- "' DType = " << (int)tensor.getDataType() << ", ElementSize = " <<
- tensor.getElementSize() << ", Shape" << shape;
+ (op.getNextNodes().size() ? op.getNextNodes()[0]->getName() : "output") <<
+ "' DType = " << (int)tensor.getDataType() << ", ElementSize = " <<
+ tensor.getElementSize() << ", Shape" << shape;
std::cout << " ElementsNumber " << shape.numElements() << "\n";
static bool do_it = false;
if (do_it || all) {
}
}
-void NNInterpreter::setInput(const std::string &name, const TensorVariant& t) {
-// TODO: our tests are failed with fe enable exception
-// feenableexcept(FE_INVALID | FE_OVERFLOW);
-// |
-// FE_DIVBYZERO |
-// FE_OVERFLOW |
-// FE_UNDERFLOW);
-// feenableexcept(FE_ALL_EXCEPT);
+void NNInterpreter::setInput(const std::string& name, const TensorVariant& t) {
+ _inputTensors.emplace(name, t);
+}
- data.emplace(name, t);
+TensorVariant NNInterpreter::getResult(IODescriptor tensor) {
+ return _opResults.at(tensor.op->getId()).at(tensor.index);
}
void NNInterpreter::visit(ops::InputOp& op) {
- (void)op;
- auto it = data.find(op.getName());
- if( it == data.end() )
- {
+ auto it = _inputTensors.find(op.getName());
+ if (it == _inputTensors.end())
throw std::runtime_error("Can't find data for node \"" + op.getName() +
". Input data was not set correctly?");
- }
- var(op.getId()) = {it->second};
+ setOutputTensors(op, {it->second});
}
void NNInterpreter::visit(ops::ConstantOp& op) {
- assert(data.find(op.getName()) == data.end());
- var(op.getId()) = {op.getValue()};
-}
-
-TensorVariant NNInterpreter::getResult(IODescriptor tensor) {
- return vars.at(tensor.op->getId()).at(tensor.index);
+ assert(_inputTensors.find(op.getName()) == _inputTensors.end());
+ setOutputTensors(op, {op.getValue()});
}
void NNInterpreter::visit(ops::ConcatOp& op) {
- auto &operands = op.getPrevNodes();
- std::vector<TensorVariant> ins;
- for (auto &in : operands)
- {
- ins.push_back(var(in.op->getId())[in.index]);
- }
- var(op.getId()) = Concat<float>(ins, op.getOutputShape(0), op.getAxis())();
+ auto inputs = getInputTensors(op);
+ auto outputs = Concat<float>(inputs, op.getOutputShape(0), op.getAxis())();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::Conv2DOp& op) {
- auto input = op.getPrevNodes()[0];
- auto kernel = op.getPrevNodes()[1];
- auto input_tensor = var(input.op->getId())[input.index];
- auto kernel_tensor = var(kernel.op->getId())[kernel.index];
- var(op.getId()) = Conv2D(input_tensor, kernel_tensor, op)();
+ auto inputs = getInputTensors(op);
+ auto outputs = Conv2D(inputs[0], inputs[1], op)();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, true);
}
void NNInterpreter::visit(ops::ReshapeOp& op) {
- auto operand = op.getPrevNodes()[0];
- auto input = var(operand.op->getId())[operand.index];
- var(op.getId()) = Reshape<float>(input, op.getOutputShape(0))();
+ auto inputs = getInputTensors(op);
+ auto outputs = Reshape<float>(inputs[0], op.getOutputShape(0))();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::ReluOp& op) {
- auto operand = op.getPrevNodes()[0];
- Tensor<float> input(var(operand.op->getId())[operand.index]);
- var(op.getId()) = Fill<float>(
- op.getOutputShape(0), [&input](const Index &id) { return std::max(input.at(id), 0.0f); })();
+ auto inputs = getInputTensors(op);
+ Tensor<float> input(inputs[0]);
+ auto outputs = Fill<float>(op.getOutputShape(0),
+ [&input](const Index& id) { return std::max(input.at(id), 0.0f); })();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::SigmoidOp& op) {
- auto operand = op.getPrevNodes()[0];
- Tensor<float> input(var(operand.op->getId())[operand.index]);
- var(op.getId()) = Fill<float>(op.getOutputShape(0), [&input](const Index& id) {
+ auto inputs = getInputTensors(op);
+ Tensor<float> input(inputs[0]);
+ auto outputs = Fill<float>(op.getOutputShape(0), [&input](const Index& id) {
return 1.f / (1.f + std::exp(-input.at(id)));
})();
+ setOutputTensors(op, std::move(outputs));
}
void NNInterpreter::visit(ops::SoftmaxOp& op) {
- auto operand = op.getPrevNodes()[0];
- auto input = var(operand.op->getId())[operand.index];
- var(op.getId()) = Softmax(op.getInputShape(0), input, op.getAxis())();
+ auto inputs = getInputTensors(op);
+ auto outputs = Softmax(op.getInputShape(0), inputs[0], op.getAxis())();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::PoolOp& op) {
- auto operand = op.getPrevNodes()[0];
- auto input = var(operand.op->getId())[operand.index];
- var(op.getId()) = Pool(input, op)();
+ auto inputs = getInputTensors(op);
+ auto outputs = Pool(inputs[0], op)();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::FullyConnectedOp& op) {
- auto operand1 = op.getPrevNodes()[0];
- auto operand2 = op.getPrevNodes()[1];
- TensorVariant input1 = var(operand1.op->getId())[operand1.index];
- TensorVariant input2 = var(operand2.op->getId())[operand2.index];
- var(op.getId()) = FullyConnected(input1, input2, op)();
+ auto inputs = getInputTensors(op);
+ auto outputs = FullyConnected(inputs[0], inputs[1], op)();
+ setOutputTensors(op, std::move(outputs));
}
void NNInterpreter::visit(ops::GemmOp& op) {
- auto operand_a = op.getPrevNodes()[0];
- auto operand_b = op.getPrevNodes()[1];
- auto operand_c = op.getPrevNodes()[2];
- const TensorVariant input_a = var(operand_a.op->getId())[operand_a.index];
- const TensorVariant input_b = var(operand_b.op->getId())[operand_b.index];
- const TensorVariant input_c = var(operand_c.op->getId())[operand_c.index];
- var(op.getId()) = Gemm<float>(input_a, input_b, input_c, op)();
+ auto inputs = getInputTensors(op);
+ auto outputs = Gemm<float>(inputs[0], inputs[1], inputs[2], op)();
+ setOutputTensors(op, std::move(outputs));
}
void NNInterpreter::visit(ops::CappedReluOp& op) {
- auto operand = op.getPrevNodes()[0];
- Tensor<float> input(var(operand.op->getId())[operand.index]);
- var(op.getId()) = Fill<float>(op.getOutputShape(0), [&input, &op](const Index &id) {
+ auto inputs = getInputTensors(op);
+ Tensor<float> input(inputs[0]);
+ auto outputs = Fill<float>(op.getOutputShape(0), [&input, &op](const Index& id) {
return std::min(std::max(input.at(id), 0.0f), op.getCap());
})();
+ setOutputTensors(op, std::move(outputs));
}
-void NNInterpreter::visit(ops::DepthwiseConv2DOp& op){
- auto input = op.getPrevNodes()[0];
- auto kernel = op.getPrevNodes()[1];
- auto input_tensor(var(input.op->getId())[input.index]);
- auto kernel_tensor(var(kernel.op->getId())[kernel.index]);
- var(op.getId()) = DepthwiseConv2D(input_tensor, kernel_tensor, op)();
+void NNInterpreter::visit(ops::DepthwiseConv2DOp& op) {
+ auto inputs = getInputTensors(op);
+ auto outputs = DepthwiseConv2D(inputs[0], inputs[1], op)();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, true);
}
void NNInterpreter::visit(ops::BiasAddOp& op) {
- auto operand1 = op.getPrevNodes()[0];
- auto operand2 = op.getPrevNodes()[1];
- auto input1 = var(operand1.op->getId())[operand1.index];
- auto input2 = var(operand2.op->getId())[operand2.index];
- var(op.getId()) = BiasAdd(input1, input2)();
+ auto inputs = getInputTensors(op);
+ auto outputs = BiasAdd(inputs[0], inputs[1])();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::BatchNormOp& op) {
- auto operand = op.getPrevNodes()[0];
- TensorVariant input(var(operand.op->getId())[operand.index]);
- // TODO implement this
- var(op.getId()) = BatchNorm<float>(input, op)();
+ auto inputs = getInputTensors(op);
+ auto outputs = BatchNorm<float>(inputs[0], op)();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::ScaleOp& op) {
- auto operand1 = op.getPrevNodes()[0];
- auto operand2 = op.getPrevNodes()[1];
- auto input1 = var(operand1.op->getId())[operand1.index];
- auto input2 = var(operand2.op->getId())[operand2.index];
- var(op.getId()) = Scale(input1, input2)();
+ auto inputs = getInputTensors(op);
+ auto outputs = Scale(inputs[0], inputs[1])();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
-
void NNInterpreter::visit(ops::SliceOp& op) {
- auto operand = op.getPrevNodes()[0];
- auto input = Tensor<float>(var(operand.op->getId())[operand.index]);
- var(op.getId()) = Fill<float>(op.getOutputShape(0), [&input, &op](const Index& id) {
+ auto inputs = getInputTensors(op);
+ auto input = Tensor<float>(inputs[0]);
+ auto outputs = Fill<float>(op.getOutputShape(0), [&input, &op](const Index& id) {
Index idx = nnc::shift(id, op.getStarts());
return input.at(idx);
})();
+ setOutputTensors(op, std::move(outputs));
}
void NNInterpreter::visit(ops::DropoutOp& op) {
- auto operand = op.getPrevNodes()[0];
- TensorVariant input(var(operand.op->getId())[operand.index]);
+ auto inputs = getInputTensors(op);
+ TensorVariant input(inputs[0]);
// TODO implement this
- var(op.getId()) = Dropout<float>(input, op)();
+ auto outputs = Dropout<float>(input, op)();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::TanhOp& op) {
- auto operand = op.getPrevNodes()[0];
- Tensor<float> input(var(operand.op->getId())[operand.index]);
- var(op.getId()) = Fill<float>(op.getOutputShape(0), [&input](const Index &id) {
+ auto inputs = getInputTensors(op);
+ Tensor<float> input(inputs[0]);
+ auto outputs = Fill<float>(op.getOutputShape(0), [&input](const Index& id) {
return std::tanh(input.at(id));
})();
+ setOutputTensors(op, std::move(outputs));
}
void NNInterpreter::visit(ops::ElementwiseOp& op) {
- auto operands = op.getPrevNodes();
+ auto inputs = getInputTensors(op);
+
std::vector<Tensor<float>> ins;
// Reserve space for tensor variants to avoid reference invalidation when pushing into vector
std::vector<TensorVariant> broadcasted{};
broadcasted.reserve(op.getNumInputs());
- for (auto &in : operands) {
- auto& tmp = var(in.op->getId())[in.index];
+ for (auto in : inputs) {
if (op.getBroadcast()) {
- broadcasted.emplace_back(tmp, op.getOutputShape(0));
+ broadcasted.emplace_back(in, op.getOutputShape(0));
ins.emplace_back(broadcasted.back());
} else {
- ins.emplace_back(tmp);
+ ins.emplace_back(in);
}
}
- float (*func)(float,float); // Another dirty hack
+ float (* func)(float, float); // Another dirty hack
switch (op.getOpType()) {
case ops::ElementwiseOp::OpType::add:
func = [](float a, float b) { return a + b; };
break;
case ops::ElementwiseOp::OpType::mul:
- func = [](float a, float b) { return a * b;};
+ func = [](float a, float b) { return a * b; };
break;
case ops::ElementwiseOp::OpType::max:
- func = [](float a, float b) { return std::max(a,b);};
+ func = [](float a, float b) { return std::max(a, b); };
break;
case ops::ElementwiseOp::OpType::div:
func = [](float a, float b) { return a / b; };
default:
assert(false && "Unsupported Optype");
}
- var(op.getId()) = Fill<float>(
- op.getOutputShape(0),
- [&func, &ins](const Index& id) {
- float acc = ins[0].at(id);
- for (size_t i = 1; i < ins.size(); i++)
- acc = func(acc, ins[i].at(id));
- return acc;
- })();
+ auto outputs = Fill<float>(op.getOutputShape(0), [&func, &ins](const Index& id) {
+ float acc = ins[0].at(id);
+ for (size_t i = 1; i < ins.size(); i++)
+ acc = func(acc, ins[i].at(id));
+ return acc;
+ })();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::DeConv2DOp& op) {
- auto input = op.getPrevNodes()[0];
- auto kernel = op.getPrevNodes()[1];
- auto input_tensor = var(input.op->getId())[input.index];
- auto kernel_tensor = var(kernel.op->getId())[kernel.index];
- var(op.getId()) = DeConv2D(input_tensor, kernel_tensor, op)();
+ auto inputs = getInputTensors(op);
+ auto outputs = DeConv2D(inputs[0], inputs[1], op)();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::EluOp& op) {
- auto operand = op.getPrevNodes()[0];
- Tensor<float> input(var(operand.op->getId())[operand.index]);
- var(op.getId()) = Fill<float>(op.getOutputShape(0), [&input, &op](const Index &id) {
+ auto inputs = getInputTensors(op);
+ Tensor<float> input(inputs[0]);
+ auto outputs = Fill<float>(op.getOutputShape(0), [&input, &op](const Index& id) {
if (input.at(id) >= 0)
return input.at(id);
else
- return op.getAlpha()*(expf(input.at(id))-1);
+ return op.getAlpha() * (expf(input.at(id)) - 1);
})();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::SqueezeOp& op) {
- auto operand = op.getPrevNodes()[0];
- auto& input = var(operand.op->getId())[operand.index];
- //Squeeze is just a special case of reshape
- var(op.getId()) = Reshape<float>(input, op.getOutputShape(0))();
+ auto inputs = getInputTensors(op);
+ // Squeeze is just a special case of reshape.
+ auto outputs = Reshape<float>(inputs[0], op.getOutputShape(0))();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::PadOp& op) {
- auto operand = op.getPrevNodes()[0];
- auto& input = var(operand.op->getId())[operand.index];
- var(op.getId()) = Pad(input, op)();
+ auto inputs = getInputTensors(op);
+ auto outputs = Pad(inputs[0], op)();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::SqrtOp& op) {
- auto operand = op.getPrevNodes()[0];
- auto input = Tensor<float>(var(operand.op->getId())[operand.index]);
- var(op.getId()) = Fill<float>(op.getOutputShape(0), [&input](const Index id) {
+ auto inputs = getInputTensors(op);
+ Tensor<float> input(inputs[0]);
+ auto outputs = Fill<float>(op.getOutputShape(0), [&input](const Index id) {
return sqrt(input.at(id));
})();
+ setOutputTensors(op, std::move(outputs));
}
void NNInterpreter::visit(ops::ResizeOp& op) {
- auto operand = op.getPrevNodes()[0];
- Tensor<float> input(var(operand.op->getId())[operand.index]);
- switch (op.getMode()) {
- case ops::ResizeOp::ResizeMethod::nearestNeighbor: {
- auto scales = op.getScales();
- var(op.getId()) = Fill<float>(op.getOutputShape(0), [&scales, &input](const Index& id) {
- Index in_idx;
- in_idx.resize(4);
- for (int i = 0; i < input.getShape().rank(); i++) {
- in_idx.at(i) = static_cast<int> (floorf(id.at(i) / scales[i]));
- }
- return input.at(in_idx);
- })();
- break;
+ auto inputs = getInputTensors(op);
+ Tensor<float> input(inputs[0]);
+ assert(op.getMode() == ops::ResizeOp::ResizeMethod::nearestNeighbor);
+ auto scales = op.getScales();
+ auto outputs = Fill<float>(op.getOutputShape(0), [&scales, &input](const Index& id) {
+ Index in_idx;
+ in_idx.resize(4);
+ for (int i = 0; i < input.getShape().rank(); i++) {
+ in_idx.at(i) = static_cast<int> (floorf(id.at(i) / scales[i]));
}
- default:
- assert(false && "Not supported Optype");
- }
+ return input.at(in_idx);
+ })();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::ReduceFOp& op) {
+ auto inputs = getInputTensors(op);
+
+ assert(op.getFuncType() == ops::ReduceFOp::FuncType::mean);
// should always be an integer in a float
- const float reduction_area =
- static_cast<float>(op.getInputShape(0).numElements() / op.getOutputShape(0).numElements());
-
- auto operand = op.getPrevNodes()[0];
- auto& input = var(operand.op->getId())[operand.index];
-
- std::function<float(float, float)> func;
- switch (op.getFuncType()) {
- case ops::ReduceFOp::FuncType::mean: {
- func = [](float running_sum, float item) { return running_sum + item; };
- var(op.getId()) = ReduceN<float>(op.getInputShape(0),
- op.getOutputShape(0), input, op.getReductionDims(), func)();
- Tensor<float> out_t = Tensor<float>(var(op.getId())[0]); // for numerical stability
- var(op.getId()) = Fill<float>(op.getOutputShape(0),
- [&out_t, reduction_area](const Index& id) {
- return out_t.at(id) / reduction_area;
- })();
- }
- break;
- default:
- assert(false && "Not Implemented");
- }
+ const float reduction_area = op.getInputShape(0).numElements() /
+ op.getOutputShape(0).numElements();
+
+ auto tmp = ReduceN<float>(op.getInputShape(0), op.getOutputShape(0), inputs[0],
+ op.getReductionDims(),
+ [](float running_sum, float item) { return running_sum + item; })();
+ Tensor<float> out_t(tmp[0]); // for numerical stability
+ auto outputs = Fill<float>(op.getOutputShape(0), [&out_t, reduction_area](const Index& id) {
+ return out_t.at(id) / reduction_area;
+ })();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::TransposeOp& op) {
- auto operand = op.getPrevNodes()[0];
- auto& input = var(operand.op->getId())[operand.index];
- var(op.getId()) = Transpose(input, op)();
+ auto inputs = getInputTensors(op);
+ auto outputs = Transpose(inputs[0], op)();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}
void NNInterpreter::visit(ops::GatherOp& op) {
- auto data_descr = op.getPrevNodes()[0];
- auto indices_descr = op.getPrevNodes()[1];
- const auto& data = var(data_descr.op->getId())[data_descr.index];
- const auto& indices = var(indices_descr.op->getId())[indices_descr.index];
- var(op.getId()) = Gather(data, indices, op)();
+ auto inputs = getInputTensors(op);
+ auto outputs = Gather(inputs[0], inputs[1], op)();
+ setOutputTensors(op, std::move(outputs));
}
void NNInterpreter::visit(ops::LeakyReluOp& op) {
- auto operand = op.getPrevNodes()[0];
+ auto inputs = getInputTensors(op);
float alpha = op.getAlpha();
- Tensor<float> input(var(operand.op->getId())[operand.index]);
- var(op.getId()) = Fill<float>(
- op.getOutputShape(0), [&input, alpha](const Index& id) {
- float val = input.at(id);
- return val > 0.0f ? val : val * alpha;
- })();
-
+ Tensor<float> input(inputs[0]);
+ auto outputs = Fill<float>(op.getOutputShape(0), [&input, alpha](const Index& id) {
+ float val = input.at(id);
+ return val > 0.0f ? val : val * alpha;
+ })();
+ setOutputTensors(op, std::move(outputs));
DUMP(op, false);
}