#include "flatbuffers/flatbuffers.h"
#include <map>
+#include <vector>
#include <memory>
#include "nnc/core/IR/model/graph/graph.h"
// that correspond to operations having these tensors as output.
std::map<int, INode::Ref> opsForTensorsTheyOutput;
+ // These two methods prepare data for creating an IR node/operation.
+ std::vector<std::shared_ptr<IrTensor>> createOpParams(const Operator *op);
+ std::vector<INode::Ref> createOpInputs(const Operator *op);
+
std::shared_ptr<IrTensor> createTensor(const Tensor *t, const Buffer *b);
std::shared_ptr<IrTensor> convertTensorForConv(std::shared_ptr<IrTensor>);
};
void IrVisitor::visit(const OperatorCode *) { throw std::runtime_error{"Not yet implemented"}; }
void IrVisitor::visit(const Buffer *) { throw std::runtime_error{"Not yet implemented"}; }
+std::vector<INode::Ref> IrVisitor::createOpInputs(const Operator *op)
+{
+ std::vector<INode::Ref> inputsForOp;
+
+ try
+ {
+ for (auto i : *(op->inputs()))
+ {
+ int bufferIdx = (*tensors)[i]->buffer();
+ if ((*buffers)[bufferIdx]->data() == nullptr)
+ {
+ // By this point every input for the operation "op" should have corresponding
+ // Model IR operations that output its inputs. This assumption is provided by the fact
+ // that TFLite format specifies all operations in the execution order.
+ inputsForOp.push_back(opsForTensorsTheyOutput.at(i));
+ }
+ }
+ }
+ catch (const std::out_of_range &e)
+ {
+ throw PluginException("Found a TFLite operator with an input tensor for which "
+ "a corresponding Model IR node that outputs it was not created.");
+ }
+
+ return inputsForOp;
+}
+
+std::vector<std::shared_ptr<IrTensor>> IrVisitor::createOpParams(const Operator *op)
+{
+ std::vector<std::shared_ptr<IrTensor>> paramsForOp;
+
+ for (auto i : *(op->inputs()))
+ {
+ const Tensor *t = (*tensors)[i];
+ const Buffer *b = (*buffers)[t->buffer()];
+ if (b->data() != nullptr)
+ {
+ std::shared_ptr<IrTensor> tensor = createTensor(t, b);
+
+ unsigned int opcode = (*opcodes)[op->opcode_index()]->builtin_code();
+
+ // Converting convolution weights tensor to HWCN format.
+ // TODO: Currently this is used by the interpreter, but by itself not necessary,
+ // so remove this when/if not needed.
+ if (opcode == BuiltinOperator_CONV_2D && t->shape()->size() == 4)
+ {
+ paramsForOp.push_back(convertTensorForConv(std::move(tensor)));
+ }
+ else
+ {
+ paramsForOp.push_back(tensor);
+ }
+ }
+ }
+
+ return paramsForOp;
+}
std::shared_ptr<IrTensor> IrVisitor::createTensor(const Tensor *t, const Buffer *b)
{